// required for old g++ to compile PRId64 macros, see
// https://github.com/pytorch/pytorch/issues/3571
// for context
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif

// an external backend might generate file within its code tree
// and check all the source files within the tree with clang-format.
// so, disable it since the backend might have a different config.
// clang-format off

// NOTE: This condition is true for all PyTorch internal libraries, it
//       just excludes external projects such as torch_xla which
//       re-use some of the PyTorch codegen machinery.
#if defined(CAFFE2_BUILD_MAIN_LIB)        || \
    defined(TORCH_CUDA_BUILD_MAIN_LIB)    || \
    defined(TORCH_HIP_BUILD_MAIN_LIB)     || \
    defined(TORCH_XPU_BUILD_MAIN_LIB)     || \
    defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
    defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#endif

// @generated by torchgen/gen.py from RegisterDispatchKey.cpp

#include <c10/core/TensorImpl.h>
#include <c10/core/Allocator.h>
#include <ATen/DeviceGuard.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/Dispatch.h>
#include <c10/util/ExclusivelyOwned.h>
#include <c10/util/Half.h>
#include <c10/core/UndefinedTensorImpl.h>
#include <optional>
#include <ATen/Tensor.h>
#include <ATen/native/Resize.h>

#include <cstddef>
#include <functional>
#include <memory>
#include <utility>

#include <ATen/Config.h>
#include <ATen/core/op_registration/adaption.h>
#include <torch/library.h>


#include <ATen/ops/as_strided_native.h>
#include <ATen/EmptyTensor.h>
#include <c10/macros/Macros.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool2d_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool2d_native.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool3d_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool3d_native.h>
#include <ATen/ops/_add_relu_cpu_dispatch.h>
#include <ATen/ops/_add_relu_native.h>
#include <ATen/ops/_addmm_activation_cpu_dispatch.h>
#include <ATen/ops/_addmm_activation_native.h>
#include <ATen/ops/_aminmax_cpu_dispatch.h>
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_cpu_dispatch.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h>
#include <ATen/ops/_amp_update_scale_cpu_dispatch.h>
#include <ATen/ops/_amp_update_scale_native.h>
#include <ATen/ops/_assert_async_cpu_dispatch.h>
#include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_batch_norm_with_update_cpu_dispatch.h>
#include <ATen/ops/_batch_norm_with_update_native.h>
#include <ATen/ops/_cdist_backward_cpu_dispatch.h>
#include <ATen/ops/_cdist_backward_native.h>
#include <ATen/ops/_cdist_forward_cpu_dispatch.h>
#include <ATen/ops/_cdist_forward_native.h>
#include <ATen/ops/_cholesky_solve_helper_cpu_dispatch.h>
#include <ATen/ops/_cholesky_solve_helper_native.h>
#include <ATen/ops/_compute_linear_combination_cpu_dispatch.h>
#include <ATen/ops/_compute_linear_combination_native.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr_cpu_dispatch.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr_native.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo_native.h>
#include <ATen/ops/_convert_weight_to_int4pack_for_cpu_cpu_dispatch.h>
#include <ATen/ops/_convert_weight_to_int4pack_for_cpu_native.h>
#include <ATen/ops/_ctc_loss_backward_cpu_dispatch.h>
#include <ATen/ops/_ctc_loss_backward_native.h>
#include <ATen/ops/_ctc_loss_cpu_dispatch.h>
#include <ATen/ops/_ctc_loss_native.h>
#include <ATen/ops/_cummax_helper_cpu_dispatch.h>
#include <ATen/ops/_cummax_helper_native.h>
#include <ATen/ops/_cummin_helper_cpu_dispatch.h>
#include <ATen/ops/_cummin_helper_native.h>
#include <ATen/ops/_dirichlet_grad_cpu_dispatch.h>
#include <ATen/ops/_dirichlet_grad_native.h>
#include <ATen/ops/_dyn_quant_matmul_4bit_cpu_dispatch.h>
#include <ATen/ops/_dyn_quant_matmul_4bit_native.h>
#include <ATen/ops/_dyn_quant_pack_4bit_weight_cpu_dispatch.h>
#include <ATen/ops/_dyn_quant_pack_4bit_weight_native.h>
#include <ATen/ops/_efficientzerotensor_cpu_dispatch.h>
#include <ATen/ops/_efficientzerotensor_native.h>
#include <ATen/ops/_embedding_bag_backward_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_backward_native.h>
#include <ATen/ops/_embedding_bag_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_dense_backward_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_dense_backward_native.h>
#include <ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_forward_only_native.h>
#include <ATen/ops/_embedding_bag_native.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
#include <ATen/ops/_empty_affine_quantized_cpu_dispatch.h>
#include <ATen/ops/_empty_affine_quantized_native.h>
#include <ATen/ops/_empty_per_channel_affine_quantized_cpu_dispatch.h>
#include <ATen/ops/_empty_per_channel_affine_quantized_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h>
#include <ATen/ops/_fft_c2c_cpu_dispatch.h>
#include <ATen/ops/_fft_c2c_native.h>
#include <ATen/ops/_fft_c2r_cpu_dispatch.h>
#include <ATen/ops/_fft_c2r_native.h>
#include <ATen/ops/_fft_r2c_cpu_dispatch.h>
#include <ATen/ops/_fft_r2c_native.h>
#include <ATen/ops/_foobar_cpu_dispatch.h>
#include <ATen/ops/_foobar_native.h>
#include <ATen/ops/_functional_assert_async_cpu_dispatch.h>
#include <ATen/ops/_functional_assert_async_native.h>
#include <ATen/ops/_fused_adagrad_cpu_dispatch.h>
#include <ATen/ops/_fused_adagrad_native.h>
#include <ATen/ops/_fused_adam_cpu_dispatch.h>
#include <ATen/ops/_fused_adam_native.h>
#include <ATen/ops/_fused_adamw_cpu_dispatch.h>
#include <ATen/ops/_fused_adamw_native.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_cpu_dispatch.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h>
#include <ATen/ops/_fused_sdp_choice_cpu_dispatch.h>
#include <ATen/ops/_fused_sdp_choice_native.h>
#include <ATen/ops/_fused_sgd_cpu_dispatch.h>
#include <ATen/ops/_fused_sgd_native.h>
#include <ATen/ops/_histogramdd_bin_edges_cpu_dispatch.h>
#include <ATen/ops/_histogramdd_bin_edges_native.h>
#include <ATen/ops/_histogramdd_from_bin_cts_cpu_dispatch.h>
#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
#include <ATen/ops/_histogramdd_from_bin_tensors_cpu_dispatch.h>
#include <ATen/ops/_histogramdd_from_bin_tensors_native.h>
#include <ATen/ops/_index_put_impl_cpu_dispatch.h>
#include <ATen/ops/_index_put_impl_native.h>
#include <ATen/ops/_int_mm_cpu_dispatch.h>
#include <ATen/ops/_int_mm_native.h>
#include <ATen/ops/_jagged_to_padded_dense_forward_cpu_dispatch.h>
#include <ATen/ops/_jagged_to_padded_dense_forward_native.h>
#include <ATen/ops/_linalg_det_cpu_dispatch.h>
#include <ATen/ops/_linalg_det_native.h>
#include <ATen/ops/_linalg_eigh_cpu_dispatch.h>
#include <ATen/ops/_linalg_eigh_native.h>
#include <ATen/ops/_linalg_eigvals_cpu_dispatch.h>
#include <ATen/ops/_linalg_eigvals_native.h>
#include <ATen/ops/_linalg_slogdet_cpu_dispatch.h>
#include <ATen/ops/_linalg_slogdet_native.h>
#include <ATen/ops/_linalg_solve_ex_cpu_dispatch.h>
#include <ATen/ops/_linalg_solve_ex_native.h>
#include <ATen/ops/_linalg_svd_cpu_dispatch.h>
#include <ATen/ops/_linalg_svd_native.h>
#include <ATen/ops/_local_scalar_dense_cpu_dispatch.h>
#include <ATen/ops/_local_scalar_dense_native.h>
#include <ATen/ops/_log_softmax_backward_data_cpu_dispatch.h>
#include <ATen/ops/_log_softmax_backward_data_native.h>
#include <ATen/ops/_log_softmax_cpu_dispatch.h>
#include <ATen/ops/_log_softmax_native.h>
#include <ATen/ops/_logcumsumexp_cpu_dispatch.h>
#include <ATen/ops/_logcumsumexp_native.h>
#include <ATen/ops/_make_dep_token_cpu_dispatch.h>
#include <ATen/ops/_make_dep_token_native.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_cpu_dispatch.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
#include <ATen/ops/_masked_softmax_backward_cpu_dispatch.h>
#include <ATen/ops/_masked_softmax_backward_native.h>
#include <ATen/ops/_masked_softmax_cpu_dispatch.h>
#include <ATen/ops/_masked_softmax_native.h>
#include <ATen/ops/_native_batch_norm_legit_cpu_dispatch.h>
#include <ATen/ops/_native_batch_norm_legit_native.h>
#include <ATen/ops/_native_multi_head_attention_cpu_dispatch.h>
#include <ATen/ops/_native_multi_head_attention_native.h>
#include <ATen/ops/_nested_compute_contiguous_strides_offsets_cpu_dispatch.h>
#include <ATen/ops/_nested_compute_contiguous_strides_offsets_native.h>
#include <ATen/ops/_nested_from_padded_cpu_dispatch.h>
#include <ATen/ops/_nested_from_padded_native.h>
#include <ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h>
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_cpu_dispatch.h>
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_native.h>
#include <ATen/ops/_nested_tensor_from_mask_native.h>
#include <ATen/ops/_nested_view_from_buffer_cpu_dispatch.h>
#include <ATen/ops/_nested_view_from_buffer_native.h>
#include <ATen/ops/_padded_dense_to_jagged_forward_cpu_dispatch.h>
#include <ATen/ops/_padded_dense_to_jagged_forward_native.h>
#include <ATen/ops/_pdist_backward_cpu_dispatch.h>
#include <ATen/ops/_pdist_backward_native.h>
#include <ATen/ops/_pdist_forward_cpu_dispatch.h>
#include <ATen/ops/_pdist_forward_native.h>
#include <ATen/ops/_prelu_kernel_backward_cpu_dispatch.h>
#include <ATen/ops/_prelu_kernel_backward_native.h>
#include <ATen/ops/_prelu_kernel_cpu_dispatch.h>
#include <ATen/ops/_prelu_kernel_native.h>
#include <ATen/ops/_reshape_alias_cpu_dispatch.h>
#include <ATen/ops/_reshape_alias_native.h>
#include <ATen/ops/_sample_dirichlet_cpu_dispatch.h>
#include <ATen/ops/_sample_dirichlet_native.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_cpu_dispatch.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_native.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_cpu_dispatch.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_native.h>
#include <ATen/ops/_segment_reduce_backward_cpu_dispatch.h>
#include <ATen/ops/_segment_reduce_backward_native.h>
#include <ATen/ops/_slow_conv2d_backward_cpu_dispatch.h>
#include <ATen/ops/_slow_conv2d_backward_native.h>
#include <ATen/ops/_slow_conv2d_forward_cpu_dispatch.h>
#include <ATen/ops/_slow_conv2d_forward_native.h>
#include <ATen/ops/_softmax_backward_data_cpu_dispatch.h>
#include <ATen/ops/_softmax_backward_data_native.h>
#include <ATen/ops/_softmax_cpu_dispatch.h>
#include <ATen/ops/_softmax_native.h>
#include <ATen/ops/_spdiags_cpu_dispatch.h>
#include <ATen/ops/_spdiags_native.h>
#include <ATen/ops/_stack_cpu_dispatch.h>
#include <ATen/ops/_stack_native.h>
#include <ATen/ops/_standard_gamma_cpu_dispatch.h>
#include <ATen/ops/_standard_gamma_grad_cpu_dispatch.h>
#include <ATen/ops/_standard_gamma_grad_native.h>
#include <ATen/ops/_standard_gamma_native.h>
#include <ATen/ops/_test_functorch_fallback_cpu_dispatch.h>
#include <ATen/ops/_test_functorch_fallback_native.h>
#include <ATen/ops/_test_optional_filled_intlist_cpu_dispatch.h>
#include <ATen/ops/_test_optional_filled_intlist_native.h>
#include <ATen/ops/_test_optional_floatlist_cpu_dispatch.h>
#include <ATen/ops/_test_optional_floatlist_native.h>
#include <ATen/ops/_test_optional_intlist_cpu_dispatch.h>
#include <ATen/ops/_test_optional_intlist_native.h>
#include <ATen/ops/_to_sparse_bsc_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_bsc_native.h>
#include <ATen/ops/_to_sparse_bsr_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_bsr_native.h>
#include <ATen/ops/_to_sparse_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_csc_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_csc_native.h>
#include <ATen/ops/_to_sparse_csr_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_csr_native.h>
#include <ATen/ops/_to_sparse_native.h>
#include <ATen/ops/_transform_bias_rescale_qkv_cpu_dispatch.h>
#include <ATen/ops/_transform_bias_rescale_qkv_native.h>
#include <ATen/ops/_transformer_encoder_layer_fwd_cpu_dispatch.h>
#include <ATen/ops/_transformer_encoder_layer_fwd_native.h>
#include <ATen/ops/_unique2_cpu_dispatch.h>
#include <ATen/ops/_unique2_native.h>
#include <ATen/ops/_unique_cpu_dispatch.h>
#include <ATen/ops/_unique_native.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h>
#include <ATen/ops/_upsample_bicubic2d_aa_cpu_dispatch.h>
#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h>
#include <ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h>
#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact1d_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact1d_native.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact2d_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact2d_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
#include <ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h>
#include <ATen/ops/_validate_compressed_sparse_indices_native.h>
#include <ATen/ops/_weight_int4pack_mm_for_cpu_cpu_dispatch.h>
#include <ATen/ops/_weight_int4pack_mm_for_cpu_native.h>
#include <ATen/ops/_weight_int8pack_mm_cpu_dispatch.h>
#include <ATen/ops/_weight_int8pack_mm_native.h>
#include <ATen/ops/_weight_norm_interface_backward_cpu_dispatch.h>
#include <ATen/ops/_weight_norm_interface_backward_native.h>
#include <ATen/ops/_weight_norm_interface_cpu_dispatch.h>
#include <ATen/ops/_weight_norm_interface_native.h>
#include <ATen/ops/abs_cpu_dispatch.h>
#include <ATen/ops/abs_native.h>
#include <ATen/ops/acos_cpu_dispatch.h>
#include <ATen/ops/acos_native.h>
#include <ATen/ops/acosh_cpu_dispatch.h>
#include <ATen/ops/acosh_native.h>
#include <ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h>
#include <ATen/ops/adaptive_avg_pool2d_native.h>
#include <ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/adaptive_avg_pool3d_backward_native.h>
#include <ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h>
#include <ATen/ops/adaptive_avg_pool3d_native.h>
#include <ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
#include <ATen/ops/adaptive_max_pool2d_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool2d_native.h>
#include <ATen/ops/adaptive_max_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool3d_backward_native.h>
#include <ATen/ops/adaptive_max_pool3d_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool3d_native.h>
#include <ATen/ops/add_cpu_dispatch.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/addbmm_cpu_dispatch.h>
#include <ATen/ops/addbmm_native.h>
#include <ATen/ops/addcdiv_cpu_dispatch.h>
#include <ATen/ops/addcdiv_native.h>
#include <ATen/ops/addcmul_cpu_dispatch.h>
#include <ATen/ops/addcmul_native.h>
#include <ATen/ops/addmm_cpu_dispatch.h>
#include <ATen/ops/addmm_native.h>
#include <ATen/ops/addmv_cpu_dispatch.h>
#include <ATen/ops/addmv_native.h>
#include <ATen/ops/addr_cpu_dispatch.h>
#include <ATen/ops/addr_native.h>
#include <ATen/ops/all_cpu_dispatch.h>
#include <ATen/ops/all_native.h>
#include <ATen/ops/amax_cpu_dispatch.h>
#include <ATen/ops/amax_native.h>
#include <ATen/ops/amin_cpu_dispatch.h>
#include <ATen/ops/amin_native.h>
#include <ATen/ops/aminmax_cpu_dispatch.h>
#include <ATen/ops/aminmax_native.h>
#include <ATen/ops/angle_cpu_dispatch.h>
#include <ATen/ops/angle_native.h>
#include <ATen/ops/any_cpu_dispatch.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/arange_cpu_dispatch.h>
#include <ATen/ops/arange_native.h>
#include <ATen/ops/argmax_cpu_dispatch.h>
#include <ATen/ops/argmax_native.h>
#include <ATen/ops/argmin_cpu_dispatch.h>
#include <ATen/ops/argmin_native.h>
#include <ATen/ops/as_strided_cpu_dispatch.h>
#include <ATen/ops/as_strided_native.h>
#include <ATen/ops/asin_cpu_dispatch.h>
#include <ATen/ops/asin_native.h>
#include <ATen/ops/asinh_cpu_dispatch.h>
#include <ATen/ops/asinh_native.h>
#include <ATen/ops/atan2_cpu_dispatch.h>
#include <ATen/ops/atan2_native.h>
#include <ATen/ops/atan_cpu_dispatch.h>
#include <ATen/ops/atan_native.h>
#include <ATen/ops/atanh_cpu_dispatch.h>
#include <ATen/ops/atanh_native.h>
#include <ATen/ops/avg_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/avg_pool2d_backward_native.h>
#include <ATen/ops/avg_pool2d_cpu_dispatch.h>
#include <ATen/ops/avg_pool2d_native.h>
#include <ATen/ops/avg_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/avg_pool3d_backward_native.h>
#include <ATen/ops/avg_pool3d_cpu_dispatch.h>
#include <ATen/ops/avg_pool3d_native.h>
#include <ATen/ops/baddbmm_cpu_dispatch.h>
#include <ATen/ops/baddbmm_native.h>
#include <ATen/ops/batch_norm_backward_cpu_dispatch.h>
#include <ATen/ops/batch_norm_backward_native.h>
#include <ATen/ops/batch_norm_update_stats_cpu_dispatch.h>
#include <ATen/ops/batch_norm_update_stats_native.h>
#include <ATen/ops/bernoulli_cpu_dispatch.h>
#include <ATen/ops/bernoulli_native.h>
#include <ATen/ops/binary_cross_entropy_backward_cpu_dispatch.h>
#include <ATen/ops/binary_cross_entropy_backward_native.h>
#include <ATen/ops/binary_cross_entropy_cpu_dispatch.h>
#include <ATen/ops/binary_cross_entropy_native.h>
#include <ATen/ops/bincount_cpu_dispatch.h>
#include <ATen/ops/bincount_native.h>
#include <ATen/ops/binomial_cpu_dispatch.h>
#include <ATen/ops/binomial_native.h>
#include <ATen/ops/bitwise_and_cpu_dispatch.h>
#include <ATen/ops/bitwise_and_native.h>
#include <ATen/ops/bitwise_left_shift_cpu_dispatch.h>
#include <ATen/ops/bitwise_left_shift_native.h>
#include <ATen/ops/bitwise_not_cpu_dispatch.h>
#include <ATen/ops/bitwise_not_native.h>
#include <ATen/ops/bitwise_or_cpu_dispatch.h>
#include <ATen/ops/bitwise_or_native.h>
#include <ATen/ops/bitwise_right_shift_cpu_dispatch.h>
#include <ATen/ops/bitwise_right_shift_native.h>
#include <ATen/ops/bitwise_xor_cpu_dispatch.h>
#include <ATen/ops/bitwise_xor_native.h>
#include <ATen/ops/bmm_cpu_dispatch.h>
#include <ATen/ops/bmm_native.h>
#include <ATen/ops/bucketize_cpu_dispatch.h>
#include <ATen/ops/bucketize_native.h>
#include <ATen/ops/cat_cpu_dispatch.h>
#include <ATen/ops/cat_native.h>
#include <ATen/ops/cauchy_cpu_dispatch.h>
#include <ATen/ops/cauchy_native.h>
#include <ATen/ops/ceil_cpu_dispatch.h>
#include <ATen/ops/ceil_native.h>
#include <ATen/ops/channel_shuffle_cpu_dispatch.h>
#include <ATen/ops/channel_shuffle_native.h>
#include <ATen/ops/cholesky_cpu_dispatch.h>
#include <ATen/ops/cholesky_inverse_cpu_dispatch.h>
#include <ATen/ops/cholesky_inverse_native.h>
#include <ATen/ops/cholesky_native.h>
#include <ATen/ops/clamp_cpu_dispatch.h>
#include <ATen/ops/clamp_max_cpu_dispatch.h>
#include <ATen/ops/clamp_max_native.h>
#include <ATen/ops/clamp_min_cpu_dispatch.h>
#include <ATen/ops/clamp_min_native.h>
#include <ATen/ops/clamp_native.h>
#include <ATen/ops/col2im_cpu_dispatch.h>
#include <ATen/ops/col2im_native.h>
#include <ATen/ops/complex_cpu_dispatch.h>
#include <ATen/ops/complex_native.h>
#include <ATen/ops/conj_physical_cpu_dispatch.h>
#include <ATen/ops/conj_physical_native.h>
#include <ATen/ops/copysign_cpu_dispatch.h>
#include <ATen/ops/copysign_native.h>
#include <ATen/ops/cos_cpu_dispatch.h>
#include <ATen/ops/cos_native.h>
#include <ATen/ops/cosh_cpu_dispatch.h>
#include <ATen/ops/cosh_native.h>
#include <ATen/ops/count_nonzero_cpu_dispatch.h>
#include <ATen/ops/count_nonzero_native.h>
#include <ATen/ops/cumprod_cpu_dispatch.h>
#include <ATen/ops/cumprod_native.h>
#include <ATen/ops/cumsum_cpu_dispatch.h>
#include <ATen/ops/cumsum_native.h>
#include <ATen/ops/dequantize_cpu_dispatch.h>
#include <ATen/ops/dequantize_native.h>
#include <ATen/ops/digamma_cpu_dispatch.h>
#include <ATen/ops/digamma_native.h>
#include <ATen/ops/div_cpu_dispatch.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/dot_cpu_dispatch.h>
#include <ATen/ops/dot_native.h>
#include <ATen/ops/elu_backward_cpu_dispatch.h>
#include <ATen/ops/elu_backward_native.h>
#include <ATen/ops/elu_cpu_dispatch.h>
#include <ATen/ops/elu_native.h>
#include <ATen/ops/embedding_dense_backward_cpu_dispatch.h>
#include <ATen/ops/embedding_dense_backward_native.h>
#include <ATen/ops/embedding_renorm_cpu_dispatch.h>
#include <ATen/ops/embedding_renorm_native.h>
#include <ATen/ops/empty_cpu_dispatch.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/empty_strided_cpu_dispatch.h>
#include <ATen/ops/empty_strided_native.h>
#include <ATen/ops/eq_cpu_dispatch.h>
#include <ATen/ops/eq_native.h>
#include <ATen/ops/equal_cpu_dispatch.h>
#include <ATen/ops/equal_native.h>
#include <ATen/ops/erf_cpu_dispatch.h>
#include <ATen/ops/erf_native.h>
#include <ATen/ops/erfc_cpu_dispatch.h>
#include <ATen/ops/erfc_native.h>
#include <ATen/ops/erfinv_cpu_dispatch.h>
#include <ATen/ops/erfinv_native.h>
#include <ATen/ops/exp2_cpu_dispatch.h>
#include <ATen/ops/exp2_native.h>
#include <ATen/ops/exp_cpu_dispatch.h>
#include <ATen/ops/exp_native.h>
#include <ATen/ops/expm1_cpu_dispatch.h>
#include <ATen/ops/expm1_native.h>
#include <ATen/ops/exponential_cpu_dispatch.h>
#include <ATen/ops/exponential_native.h>
#include <ATen/ops/eye_cpu_dispatch.h>
#include <ATen/ops/eye_native.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_cpu_dispatch.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h>
#include <ATen/ops/fill_cpu_dispatch.h>
#include <ATen/ops/fill_native.h>
#include <ATen/ops/flip_cpu_dispatch.h>
#include <ATen/ops/flip_native.h>
#include <ATen/ops/floor_cpu_dispatch.h>
#include <ATen/ops/floor_divide_cpu_dispatch.h>
#include <ATen/ops/floor_divide_native.h>
#include <ATen/ops/floor_native.h>
#include <ATen/ops/fmax_cpu_dispatch.h>
#include <ATen/ops/fmax_native.h>
#include <ATen/ops/fmin_cpu_dispatch.h>
#include <ATen/ops/fmin_native.h>
#include <ATen/ops/fmod_cpu_dispatch.h>
#include <ATen/ops/fmod_native.h>
#include <ATen/ops/frac_cpu_dispatch.h>
#include <ATen/ops/frac_native.h>
#include <ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool2d_backward_native.h>
#include <ATen/ops/fractional_max_pool2d_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool2d_native.h>
#include <ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool3d_backward_native.h>
#include <ATen/ops/fractional_max_pool3d_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool3d_native.h>
#include <ATen/ops/frexp_cpu_dispatch.h>
#include <ATen/ops/frexp_native.h>
#include <ATen/ops/from_file_cpu_dispatch.h>
#include <ATen/ops/from_file_native.h>
#include <ATen/ops/gather_cpu_dispatch.h>
#include <ATen/ops/gather_native.h>
#include <ATen/ops/gcd_cpu_dispatch.h>
#include <ATen/ops/gcd_native.h>
#include <ATen/ops/ge_cpu_dispatch.h>
#include <ATen/ops/ge_native.h>
#include <ATen/ops/gelu_backward_cpu_dispatch.h>
#include <ATen/ops/gelu_backward_native.h>
#include <ATen/ops/gelu_cpu_dispatch.h>
#include <ATen/ops/gelu_native.h>
#include <ATen/ops/geometric_cpu_dispatch.h>
#include <ATen/ops/geometric_native.h>
#include <ATen/ops/geqrf_cpu_dispatch.h>
#include <ATen/ops/geqrf_native.h>
#include <ATen/ops/glu_backward_cpu_dispatch.h>
#include <ATen/ops/glu_backward_jvp_cpu_dispatch.h>
#include <ATen/ops/glu_backward_jvp_native.h>
#include <ATen/ops/glu_backward_native.h>
#include <ATen/ops/glu_cpu_dispatch.h>
#include <ATen/ops/glu_jvp_cpu_dispatch.h>
#include <ATen/ops/glu_jvp_native.h>
#include <ATen/ops/glu_native.h>
#include <ATen/ops/grid_sampler_2d_backward_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_2d_backward_native.h>
#include <ATen/ops/grid_sampler_2d_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_2d_native.h>
#include <ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_3d_backward_native.h>
#include <ATen/ops/grid_sampler_3d_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_3d_native.h>
#include <ATen/ops/gt_cpu_dispatch.h>
#include <ATen/ops/gt_native.h>
#include <ATen/ops/hardshrink_backward_cpu_dispatch.h>
#include <ATen/ops/hardshrink_backward_native.h>
#include <ATen/ops/hardshrink_cpu_dispatch.h>
#include <ATen/ops/hardshrink_native.h>
#include <ATen/ops/hardsigmoid_backward_cpu_dispatch.h>
#include <ATen/ops/hardsigmoid_backward_native.h>
#include <ATen/ops/hardsigmoid_cpu_dispatch.h>
#include <ATen/ops/hardsigmoid_native.h>
#include <ATen/ops/hardswish_backward_cpu_dispatch.h>
#include <ATen/ops/hardswish_backward_native.h>
#include <ATen/ops/hardswish_cpu_dispatch.h>
#include <ATen/ops/hardswish_native.h>
#include <ATen/ops/hardtanh_backward_cpu_dispatch.h>
#include <ATen/ops/hardtanh_backward_native.h>
#include <ATen/ops/hardtanh_cpu_dispatch.h>
#include <ATen/ops/hardtanh_native.h>
#include <ATen/ops/heaviside_cpu_dispatch.h>
#include <ATen/ops/heaviside_native.h>
#include <ATen/ops/histc_cpu_dispatch.h>
#include <ATen/ops/histc_native.h>
#include <ATen/ops/histogram_cpu_dispatch.h>
#include <ATen/ops/histogram_native.h>
#include <ATen/ops/huber_loss_backward_cpu_dispatch.h>
#include <ATen/ops/huber_loss_backward_native.h>
#include <ATen/ops/huber_loss_cpu_dispatch.h>
#include <ATen/ops/huber_loss_native.h>
#include <ATen/ops/hypot_cpu_dispatch.h>
#include <ATen/ops/hypot_native.h>
#include <ATen/ops/i0_cpu_dispatch.h>
#include <ATen/ops/i0_native.h>
#include <ATen/ops/igamma_cpu_dispatch.h>
#include <ATen/ops/igamma_native.h>
#include <ATen/ops/igammac_cpu_dispatch.h>
#include <ATen/ops/igammac_native.h>
#include <ATen/ops/im2col_cpu_dispatch.h>
#include <ATen/ops/im2col_native.h>
#include <ATen/ops/index_add_cpu_dispatch.h>
#include <ATen/ops/index_add_native.h>
#include <ATen/ops/index_copy_cpu_dispatch.h>
#include <ATen/ops/index_copy_native.h>
#include <ATen/ops/index_cpu_dispatch.h>
#include <ATen/ops/index_fill_cpu_dispatch.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_native.h>
#include <ATen/ops/index_reduce_cpu_dispatch.h>
#include <ATen/ops/index_reduce_native.h>
#include <ATen/ops/index_select_cpu_dispatch.h>
#include <ATen/ops/index_select_native.h>
#include <ATen/ops/is_set_to_cpu_dispatch.h>
#include <ATen/ops/is_set_to_native.h>
#include <ATen/ops/isin_cpu_dispatch.h>
#include <ATen/ops/isin_native.h>
#include <ATen/ops/isnan_cpu_dispatch.h>
#include <ATen/ops/isnan_native.h>
#include <ATen/ops/isneginf_cpu_dispatch.h>
#include <ATen/ops/isneginf_native.h>
#include <ATen/ops/isposinf_cpu_dispatch.h>
#include <ATen/ops/isposinf_native.h>
#include <ATen/ops/kthvalue_cpu_dispatch.h>
#include <ATen/ops/kthvalue_native.h>
#include <ATen/ops/lcm_cpu_dispatch.h>
#include <ATen/ops/lcm_native.h>
#include <ATen/ops/le_cpu_dispatch.h>
#include <ATen/ops/le_native.h>
#include <ATen/ops/leaky_relu_backward_cpu_dispatch.h>
#include <ATen/ops/leaky_relu_backward_native.h>
#include <ATen/ops/leaky_relu_cpu_dispatch.h>
#include <ATen/ops/leaky_relu_native.h>
#include <ATen/ops/lerp_cpu_dispatch.h>
#include <ATen/ops/lerp_native.h>
#include <ATen/ops/lgamma_cpu_dispatch.h>
#include <ATen/ops/lgamma_native.h>
#include <ATen/ops/linalg_cholesky_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_cholesky_ex_native.h>
#include <ATen/ops/linalg_cross_cpu_dispatch.h>
#include <ATen/ops/linalg_cross_native.h>
#include <ATen/ops/linalg_eig_cpu_dispatch.h>
#include <ATen/ops/linalg_eig_native.h>
#include <ATen/ops/linalg_eigvals_cpu_dispatch.h>
#include <ATen/ops/linalg_eigvals_native.h>
#include <ATen/ops/linalg_householder_product_cpu_dispatch.h>
#include <ATen/ops/linalg_householder_product_native.h>
#include <ATen/ops/linalg_inv_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_inv_ex_native.h>
#include <ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_ldl_factor_ex_native.h>
#include <ATen/ops/linalg_ldl_solve_cpu_dispatch.h>
#include <ATen/ops/linalg_ldl_solve_native.h>
#include <ATen/ops/linalg_lstsq_cpu_dispatch.h>
#include <ATen/ops/linalg_lstsq_native.h>
#include <ATen/ops/linalg_lu_cpu_dispatch.h>
#include <ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_lu_factor_ex_native.h>
#include <ATen/ops/linalg_lu_native.h>
#include <ATen/ops/linalg_lu_solve_cpu_dispatch.h>
#include <ATen/ops/linalg_lu_solve_native.h>
#include <ATen/ops/linalg_matrix_exp_cpu_dispatch.h>
#include <ATen/ops/linalg_matrix_exp_native.h>
#include <ATen/ops/linalg_qr_cpu_dispatch.h>
#include <ATen/ops/linalg_qr_native.h>
#include <ATen/ops/linalg_solve_triangular_cpu_dispatch.h>
#include <ATen/ops/linalg_solve_triangular_native.h>
#include <ATen/ops/linalg_vector_norm_cpu_dispatch.h>
#include <ATen/ops/linalg_vector_norm_native.h>
#include <ATen/ops/linspace_cpu_dispatch.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/log10_cpu_dispatch.h>
#include <ATen/ops/log10_native.h>
#include <ATen/ops/log1p_cpu_dispatch.h>
#include <ATen/ops/log1p_native.h>
#include <ATen/ops/log2_cpu_dispatch.h>
#include <ATen/ops/log2_native.h>
#include <ATen/ops/log_cpu_dispatch.h>
#include <ATen/ops/log_native.h>
#include <ATen/ops/log_normal_cpu_dispatch.h>
#include <ATen/ops/log_normal_native.h>
#include <ATen/ops/log_sigmoid_backward_cpu_dispatch.h>
#include <ATen/ops/log_sigmoid_backward_native.h>
#include <ATen/ops/log_sigmoid_forward_cpu_dispatch.h>
#include <ATen/ops/log_sigmoid_forward_native.h>
#include <ATen/ops/logaddexp2_cpu_dispatch.h>
#include <ATen/ops/logaddexp2_native.h>
#include <ATen/ops/logaddexp_cpu_dispatch.h>
#include <ATen/ops/logaddexp_native.h>
#include <ATen/ops/logical_and_cpu_dispatch.h>
#include <ATen/ops/logical_and_native.h>
#include <ATen/ops/logical_not_cpu_dispatch.h>
#include <ATen/ops/logical_not_native.h>
#include <ATen/ops/logical_or_cpu_dispatch.h>
#include <ATen/ops/logical_or_native.h>
#include <ATen/ops/logical_xor_cpu_dispatch.h>
#include <ATen/ops/logical_xor_native.h>
#include <ATen/ops/logit_backward_cpu_dispatch.h>
#include <ATen/ops/logit_backward_native.h>
#include <ATen/ops/logit_cpu_dispatch.h>
#include <ATen/ops/logit_native.h>
#include <ATen/ops/logspace_cpu_dispatch.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/lshift_cpu_dispatch.h>
#include <ATen/ops/lshift_native.h>
#include <ATen/ops/lt_cpu_dispatch.h>
#include <ATen/ops/lt_native.h>
#include <ATen/ops/lu_unpack_cpu_dispatch.h>
#include <ATen/ops/lu_unpack_native.h>
#include <ATen/ops/masked_fill_cpu_dispatch.h>
#include <ATen/ops/masked_fill_native.h>
#include <ATen/ops/masked_scatter_cpu_dispatch.h>
#include <ATen/ops/masked_scatter_native.h>
#include <ATen/ops/masked_select_cpu_dispatch.h>
#include <ATen/ops/masked_select_native.h>
#include <ATen/ops/max_cpu_dispatch.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h>
#include <ATen/ops/max_pool2d_with_indices_backward_native.h>
#include <ATen/ops/max_pool2d_with_indices_cpu_dispatch.h>
#include <ATen/ops/max_pool2d_with_indices_native.h>
#include <ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h>
#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
#include <ATen/ops/max_pool3d_with_indices_cpu_dispatch.h>
#include <ATen/ops/max_pool3d_with_indices_native.h>
#include <ATen/ops/max_unpool2d_cpu_dispatch.h>
#include <ATen/ops/max_unpool2d_native.h>
#include <ATen/ops/max_unpool3d_cpu_dispatch.h>
#include <ATen/ops/max_unpool3d_native.h>
#include <ATen/ops/maximum_cpu_dispatch.h>
#include <ATen/ops/maximum_native.h>
#include <ATen/ops/mean_cpu_dispatch.h>
#include <ATen/ops/mean_native.h>
#include <ATen/ops/median_cpu_dispatch.h>
#include <ATen/ops/median_native.h>
#include <ATen/ops/min_cpu_dispatch.h>
#include <ATen/ops/min_native.h>
#include <ATen/ops/minimum_cpu_dispatch.h>
#include <ATen/ops/minimum_native.h>
#include <ATen/ops/mish_backward_cpu_dispatch.h>
#include <ATen/ops/mish_backward_native.h>
#include <ATen/ops/mish_cpu_dispatch.h>
#include <ATen/ops/mish_native.h>
#include <ATen/ops/mkldnn_rnn_layer_backward_cpu_dispatch.h>
#include <ATen/ops/mkldnn_rnn_layer_backward_native.h>
#include <ATen/ops/mkldnn_rnn_layer_cpu_dispatch.h>
#include <ATen/ops/mkldnn_rnn_layer_native.h>
#include <ATen/ops/mm_cpu_dispatch.h>
#include <ATen/ops/mm_native.h>
#include <ATen/ops/mode_cpu_dispatch.h>
#include <ATen/ops/mode_native.h>
#include <ATen/ops/mse_loss_backward_cpu_dispatch.h>
#include <ATen/ops/mse_loss_backward_native.h>
#include <ATen/ops/mse_loss_cpu_dispatch.h>
#include <ATen/ops/mse_loss_native.h>
#include <ATen/ops/mul_cpu_dispatch.h>
#include <ATen/ops/mul_native.h>
#include <ATen/ops/multi_margin_loss_backward_cpu_dispatch.h>
#include <ATen/ops/multi_margin_loss_backward_native.h>
#include <ATen/ops/multi_margin_loss_cpu_dispatch.h>
#include <ATen/ops/multi_margin_loss_native.h>
#include <ATen/ops/multilabel_margin_loss_backward_cpu_dispatch.h>
#include <ATen/ops/multilabel_margin_loss_backward_native.h>
#include <ATen/ops/multilabel_margin_loss_forward_cpu_dispatch.h>
#include <ATen/ops/multilabel_margin_loss_forward_native.h>
#include <ATen/ops/multinomial_cpu_dispatch.h>
#include <ATen/ops/multinomial_native.h>
#include <ATen/ops/mvlgamma_cpu_dispatch.h>
#include <ATen/ops/mvlgamma_native.h>
#include <ATen/ops/nan_to_num_cpu_dispatch.h>
#include <ATen/ops/nan_to_num_native.h>
#include <ATen/ops/nanmedian_cpu_dispatch.h>
#include <ATen/ops/nanmedian_native.h>
#include <ATen/ops/nansum_cpu_dispatch.h>
#include <ATen/ops/nansum_native.h>
#include <ATen/ops/narrow_copy_cpu_dispatch.h>
#include <ATen/ops/narrow_copy_native.h>
#include <ATen/ops/native_batch_norm_backward_cpu_dispatch.h>
#include <ATen/ops/native_batch_norm_backward_native.h>
#include <ATen/ops/native_batch_norm_cpu_dispatch.h>
#include <ATen/ops/native_batch_norm_native.h>
#include <ATen/ops/native_channel_shuffle_cpu_dispatch.h>
#include <ATen/ops/native_channel_shuffle_native.h>
#include <ATen/ops/native_dropout_backward_cpu_dispatch.h>
#include <ATen/ops/native_dropout_backward_native.h>
#include <ATen/ops/native_dropout_cpu_dispatch.h>
#include <ATen/ops/native_dropout_native.h>
#include <ATen/ops/native_group_norm_backward_cpu_dispatch.h>
#include <ATen/ops/native_group_norm_backward_native.h>
#include <ATen/ops/native_group_norm_cpu_dispatch.h>
#include <ATen/ops/native_group_norm_native.h>
#include <ATen/ops/native_layer_norm_backward_cpu_dispatch.h>
#include <ATen/ops/native_layer_norm_backward_native.h>
#include <ATen/ops/native_layer_norm_cpu_dispatch.h>
#include <ATen/ops/native_layer_norm_native.h>
#include <ATen/ops/ne_cpu_dispatch.h>
#include <ATen/ops/ne_native.h>
#include <ATen/ops/neg_cpu_dispatch.h>
#include <ATen/ops/neg_native.h>
#include <ATen/ops/nextafter_cpu_dispatch.h>
#include <ATen/ops/nextafter_native.h>
#include <ATen/ops/nll_loss2d_backward_cpu_dispatch.h>
#include <ATen/ops/nll_loss2d_backward_native.h>
#include <ATen/ops/nll_loss2d_forward_cpu_dispatch.h>
#include <ATen/ops/nll_loss2d_forward_native.h>
#include <ATen/ops/nll_loss_backward_cpu_dispatch.h>
#include <ATen/ops/nll_loss_backward_native.h>
#include <ATen/ops/nll_loss_forward_cpu_dispatch.h>
#include <ATen/ops/nll_loss_forward_native.h>
#include <ATen/ops/nonzero_cpu_dispatch.h>
#include <ATen/ops/nonzero_native.h>
#include <ATen/ops/nonzero_static_cpu_dispatch.h>
#include <ATen/ops/nonzero_static_native.h>
#include <ATen/ops/norm_cpu_dispatch.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/normal_cpu_dispatch.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/ormqr_cpu_dispatch.h>
#include <ATen/ops/ormqr_native.h>
#include <ATen/ops/pixel_shuffle_cpu_dispatch.h>
#include <ATen/ops/pixel_shuffle_native.h>
#include <ATen/ops/pixel_unshuffle_cpu_dispatch.h>
#include <ATen/ops/pixel_unshuffle_native.h>
#include <ATen/ops/poisson_cpu_dispatch.h>
#include <ATen/ops/poisson_native.h>
#include <ATen/ops/polar_cpu_dispatch.h>
#include <ATen/ops/polar_native.h>
#include <ATen/ops/polygamma_cpu_dispatch.h>
#include <ATen/ops/polygamma_native.h>
#include <ATen/ops/pow_cpu_dispatch.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/prod_cpu_dispatch.h>
#include <ATen/ops/prod_native.h>
#include <ATen/ops/put_cpu_dispatch.h>
#include <ATen/ops/put_native.h>
#include <ATen/ops/quantize_per_channel_cpu_dispatch.h>
#include <ATen/ops/quantize_per_channel_native.h>
#include <ATen/ops/quantize_per_tensor_cpu_dispatch.h>
#include <ATen/ops/quantize_per_tensor_dynamic_cpu_dispatch.h>
#include <ATen/ops/quantize_per_tensor_dynamic_native.h>
#include <ATen/ops/quantize_per_tensor_native.h>
#include <ATen/ops/random_cpu_dispatch.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/randperm_cpu_dispatch.h>
#include <ATen/ops/randperm_native.h>
#include <ATen/ops/range_cpu_dispatch.h>
#include <ATen/ops/range_native.h>
#include <ATen/ops/reciprocal_cpu_dispatch.h>
#include <ATen/ops/reciprocal_native.h>
#include <ATen/ops/reflection_pad1d_backward_cpu_dispatch.h>
#include <ATen/ops/reflection_pad1d_backward_native.h>
#include <ATen/ops/reflection_pad1d_cpu_dispatch.h>
#include <ATen/ops/reflection_pad1d_native.h>
#include <ATen/ops/reflection_pad2d_backward_cpu_dispatch.h>
#include <ATen/ops/reflection_pad2d_backward_native.h>
#include <ATen/ops/reflection_pad2d_cpu_dispatch.h>
#include <ATen/ops/reflection_pad2d_native.h>
#include <ATen/ops/reflection_pad3d_backward_cpu_dispatch.h>
#include <ATen/ops/reflection_pad3d_backward_native.h>
#include <ATen/ops/reflection_pad3d_cpu_dispatch.h>
#include <ATen/ops/reflection_pad3d_native.h>
#include <ATen/ops/relu_cpu_dispatch.h>
#include <ATen/ops/relu_native.h>
#include <ATen/ops/remainder_cpu_dispatch.h>
#include <ATen/ops/remainder_native.h>
#include <ATen/ops/renorm_cpu_dispatch.h>
#include <ATen/ops/renorm_native.h>
#include <ATen/ops/repeat_interleave_cpu_dispatch.h>
#include <ATen/ops/repeat_interleave_native.h>
#include <ATen/ops/replication_pad1d_backward_cpu_dispatch.h>
#include <ATen/ops/replication_pad1d_backward_native.h>
#include <ATen/ops/replication_pad1d_cpu_dispatch.h>
#include <ATen/ops/replication_pad1d_native.h>
#include <ATen/ops/replication_pad2d_backward_cpu_dispatch.h>
#include <ATen/ops/replication_pad2d_backward_native.h>
#include <ATen/ops/replication_pad2d_cpu_dispatch.h>
#include <ATen/ops/replication_pad2d_native.h>
#include <ATen/ops/replication_pad3d_backward_cpu_dispatch.h>
#include <ATen/ops/replication_pad3d_backward_native.h>
#include <ATen/ops/replication_pad3d_cpu_dispatch.h>
#include <ATen/ops/replication_pad3d_native.h>
#include <ATen/ops/resize_cpu_dispatch.h>
#include <ATen/ops/resize_native.h>
#include <ATen/ops/roll_cpu_dispatch.h>
#include <ATen/ops/roll_native.h>
#include <ATen/ops/round_cpu_dispatch.h>
#include <ATen/ops/round_native.h>
#include <ATen/ops/rrelu_with_noise_cpu_dispatch.h>
#include <ATen/ops/rrelu_with_noise_native.h>
#include <ATen/ops/rshift_cpu_dispatch.h>
#include <ATen/ops/rshift_native.h>
#include <ATen/ops/rsqrt_cpu_dispatch.h>
#include <ATen/ops/rsqrt_native.h>
#include <ATen/ops/rsub_cpu_dispatch.h>
#include <ATen/ops/rsub_native.h>
#include <ATen/ops/scatter_add_cpu_dispatch.h>
#include <ATen/ops/scatter_add_native.h>
#include <ATen/ops/scatter_cpu_dispatch.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_reduce_cpu_dispatch.h>
#include <ATen/ops/scatter_reduce_native.h>
#include <ATen/ops/searchsorted_cpu_dispatch.h>
#include <ATen/ops/searchsorted_native.h>
#include <ATen/ops/segment_reduce_cpu_dispatch.h>
#include <ATen/ops/segment_reduce_native.h>
#include <ATen/ops/set_cpu_dispatch.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/sgn_cpu_dispatch.h>
#include <ATen/ops/sgn_native.h>
#include <ATen/ops/sigmoid_backward_cpu_dispatch.h>
#include <ATen/ops/sigmoid_backward_native.h>
#include <ATen/ops/sigmoid_cpu_dispatch.h>
#include <ATen/ops/sigmoid_native.h>
#include <ATen/ops/sign_cpu_dispatch.h>
#include <ATen/ops/sign_native.h>
#include <ATen/ops/signbit_cpu_dispatch.h>
#include <ATen/ops/signbit_native.h>
#include <ATen/ops/silu_backward_cpu_dispatch.h>
#include <ATen/ops/silu_backward_native.h>
#include <ATen/ops/silu_cpu_dispatch.h>
#include <ATen/ops/silu_native.h>
#include <ATen/ops/sin_cpu_dispatch.h>
#include <ATen/ops/sin_native.h>
#include <ATen/ops/sinc_cpu_dispatch.h>
#include <ATen/ops/sinc_native.h>
#include <ATen/ops/sinh_cpu_dispatch.h>
#include <ATen/ops/sinh_native.h>
#include <ATen/ops/slow_conv3d_forward_cpu_dispatch.h>
#include <ATen/ops/slow_conv3d_forward_native.h>
#include <ATen/ops/slow_conv_dilated2d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_dilated2d_native.h>
#include <ATen/ops/slow_conv_dilated3d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_dilated3d_native.h>
#include <ATen/ops/slow_conv_transpose2d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_transpose2d_native.h>
#include <ATen/ops/slow_conv_transpose3d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_transpose3d_native.h>
#include <ATen/ops/smooth_l1_loss_backward_cpu_dispatch.h>
#include <ATen/ops/smooth_l1_loss_backward_native.h>
#include <ATen/ops/smooth_l1_loss_cpu_dispatch.h>
#include <ATen/ops/smooth_l1_loss_native.h>
#include <ATen/ops/softplus_backward_cpu_dispatch.h>
#include <ATen/ops/softplus_backward_native.h>
#include <ATen/ops/softplus_cpu_dispatch.h>
#include <ATen/ops/softplus_native.h>
#include <ATen/ops/softshrink_backward_cpu_dispatch.h>
#include <ATen/ops/softshrink_backward_native.h>
#include <ATen/ops/softshrink_cpu_dispatch.h>
#include <ATen/ops/softshrink_native.h>
#include <ATen/ops/sort_cpu_dispatch.h>
#include <ATen/ops/sort_native.h>
#include <ATen/ops/special_airy_ai_cpu_dispatch.h>
#include <ATen/ops/special_airy_ai_native.h>
#include <ATen/ops/special_bessel_j0_cpu_dispatch.h>
#include <ATen/ops/special_bessel_j0_native.h>
#include <ATen/ops/special_bessel_j1_cpu_dispatch.h>
#include <ATen/ops/special_bessel_j1_native.h>
#include <ATen/ops/special_bessel_y0_cpu_dispatch.h>
#include <ATen/ops/special_bessel_y0_native.h>
#include <ATen/ops/special_bessel_y1_cpu_dispatch.h>
#include <ATen/ops/special_bessel_y1_native.h>
#include <ATen/ops/special_chebyshev_polynomial_t_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_chebyshev_polynomial_u_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_chebyshev_polynomial_v_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_chebyshev_polynomial_w_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_entr_cpu_dispatch.h>
#include <ATen/ops/special_entr_native.h>
#include <ATen/ops/special_erfcx_cpu_dispatch.h>
#include <ATen/ops/special_erfcx_native.h>
#include <ATen/ops/special_hermite_polynomial_h_cpu_dispatch.h>
#include <ATen/ops/special_hermite_polynomial_h_native.h>
#include <ATen/ops/special_hermite_polynomial_he_cpu_dispatch.h>
#include <ATen/ops/special_hermite_polynomial_he_native.h>
#include <ATen/ops/special_i0e_cpu_dispatch.h>
#include <ATen/ops/special_i0e_native.h>
#include <ATen/ops/special_i1_cpu_dispatch.h>
#include <ATen/ops/special_i1_native.h>
#include <ATen/ops/special_i1e_cpu_dispatch.h>
#include <ATen/ops/special_i1e_native.h>
#include <ATen/ops/special_laguerre_polynomial_l_cpu_dispatch.h>
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
#include <ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h>
#include <ATen/ops/special_legendre_polynomial_p_native.h>
#include <ATen/ops/special_log_ndtr_cpu_dispatch.h>
#include <ATen/ops/special_log_ndtr_native.h>
#include <ATen/ops/special_modified_bessel_i0_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_i0_native.h>
#include <ATen/ops/special_modified_bessel_i1_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_i1_native.h>
#include <ATen/ops/special_modified_bessel_k0_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_k0_native.h>
#include <ATen/ops/special_modified_bessel_k1_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_k1_native.h>
#include <ATen/ops/special_ndtri_cpu_dispatch.h>
#include <ATen/ops/special_ndtri_native.h>
#include <ATen/ops/special_scaled_modified_bessel_k0_cpu_dispatch.h>
#include <ATen/ops/special_scaled_modified_bessel_k0_native.h>
#include <ATen/ops/special_scaled_modified_bessel_k1_cpu_dispatch.h>
#include <ATen/ops/special_scaled_modified_bessel_k1_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_spherical_bessel_j0_cpu_dispatch.h>
#include <ATen/ops/special_spherical_bessel_j0_native.h>
#include <ATen/ops/special_xlog1py_cpu_dispatch.h>
#include <ATen/ops/special_xlog1py_native.h>
#include <ATen/ops/special_zeta_cpu_dispatch.h>
#include <ATen/ops/special_zeta_native.h>
#include <ATen/ops/sqrt_cpu_dispatch.h>
#include <ATen/ops/sqrt_native.h>
#include <ATen/ops/sspaddmm_cpu_dispatch.h>
#include <ATen/ops/sspaddmm_native.h>
#include <ATen/ops/std_cpu_dispatch.h>
#include <ATen/ops/std_mean_cpu_dispatch.h>
#include <ATen/ops/std_mean_native.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/sub_cpu_dispatch.h>
#include <ATen/ops/sub_native.h>
#include <ATen/ops/sum_cpu_dispatch.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/take_cpu_dispatch.h>
#include <ATen/ops/take_native.h>
#include <ATen/ops/tan_cpu_dispatch.h>
#include <ATen/ops/tan_native.h>
#include <ATen/ops/tanh_backward_cpu_dispatch.h>
#include <ATen/ops/tanh_backward_native.h>
#include <ATen/ops/tanh_cpu_dispatch.h>
#include <ATen/ops/tanh_native.h>
#include <ATen/ops/threshold_backward_cpu_dispatch.h>
#include <ATen/ops/threshold_backward_native.h>
#include <ATen/ops/threshold_cpu_dispatch.h>
#include <ATen/ops/threshold_native.h>
#include <ATen/ops/to_mkldnn_cpu_dispatch.h>
#include <ATen/ops/to_mkldnn_native.h>
#include <ATen/ops/topk_cpu_dispatch.h>
#include <ATen/ops/topk_native.h>
#include <ATen/ops/trace_cpu_dispatch.h>
#include <ATen/ops/trace_native.h>
#include <ATen/ops/triangular_solve_cpu_dispatch.h>
#include <ATen/ops/triangular_solve_native.h>
#include <ATen/ops/tril_cpu_dispatch.h>
#include <ATen/ops/tril_indices_cpu_dispatch.h>
#include <ATen/ops/tril_indices_native.h>
#include <ATen/ops/tril_native.h>
#include <ATen/ops/triu_cpu_dispatch.h>
#include <ATen/ops/triu_indices_cpu_dispatch.h>
#include <ATen/ops/triu_indices_native.h>
#include <ATen/ops/triu_native.h>
#include <ATen/ops/trunc_cpu_dispatch.h>
#include <ATen/ops/trunc_native.h>
#include <ATen/ops/unfold_backward_cpu_dispatch.h>
#include <ATen/ops/unfold_backward_native.h>
#include <ATen/ops/unfold_cpu_dispatch.h>
#include <ATen/ops/unfold_native.h>
#include <ATen/ops/uniform_cpu_dispatch.h>
#include <ATen/ops/uniform_native.h>
#include <ATen/ops/unique_consecutive_cpu_dispatch.h>
#include <ATen/ops/unique_consecutive_native.h>
#include <ATen/ops/unique_dim_consecutive_cpu_dispatch.h>
#include <ATen/ops/unique_dim_consecutive_native.h>
#include <ATen/ops/unique_dim_cpu_dispatch.h>
#include <ATen/ops/unique_dim_native.h>
#include <ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_bicubic2d_backward_native.h>
#include <ATen/ops/upsample_bicubic2d_cpu_dispatch.h>
#include <ATen/ops/upsample_bicubic2d_native.h>
#include <ATen/ops/upsample_bilinear2d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_bilinear2d_backward_native.h>
#include <ATen/ops/upsample_bilinear2d_cpu_dispatch.h>
#include <ATen/ops/upsample_bilinear2d_native.h>
#include <ATen/ops/upsample_linear1d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_linear1d_backward_native.h>
#include <ATen/ops/upsample_linear1d_cpu_dispatch.h>
#include <ATen/ops/upsample_linear1d_native.h>
#include <ATen/ops/upsample_nearest1d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest1d_backward_native.h>
#include <ATen/ops/upsample_nearest1d_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest1d_native.h>
#include <ATen/ops/upsample_nearest2d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest2d_backward_native.h>
#include <ATen/ops/upsample_nearest2d_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest2d_native.h>
#include <ATen/ops/upsample_nearest3d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest3d_backward_native.h>
#include <ATen/ops/upsample_nearest3d_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest3d_native.h>
#include <ATen/ops/upsample_trilinear3d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_trilinear3d_backward_native.h>
#include <ATen/ops/upsample_trilinear3d_cpu_dispatch.h>
#include <ATen/ops/upsample_trilinear3d_native.h>
#include <ATen/ops/var_cpu_dispatch.h>
#include <ATen/ops/var_mean_cpu_dispatch.h>
#include <ATen/ops/var_mean_native.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/vdot_cpu_dispatch.h>
#include <ATen/ops/vdot_native.h>
#include <ATen/ops/view_as_complex_cpu_dispatch.h>
#include <ATen/ops/view_as_complex_native.h>
#include <ATen/ops/view_as_real_cpu_dispatch.h>
#include <ATen/ops/view_as_real_native.h>
#include <ATen/ops/view_cpu_dispatch.h>
#include <ATen/ops/view_native.h>
#include <ATen/ops/where_cpu_dispatch.h>
#include <ATen/ops/where_native.h>
#include <ATen/ops/xlogy_cpu_dispatch.h>
#include <ATen/ops/xlogy_native.h>
#include <ATen/ops/zero_cpu_dispatch.h>
#include <ATen/ops/zero_native.h>

namespace at {
namespace {
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-function")

Tensor create_out(IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  if (strides.empty()) {
      return at::detail::empty_cpu(sizes, options);
  } else {
      return at::detail::empty_strided_cpu(sizes, strides, options);
  }
}

void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  TORCH_CHECK(options.dtype() == out.dtype(),
      "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
  TORCH_CHECK(options.device() == out.device(),
      "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
  const bool resized = at::native::resize_output(out, sizes);
  // Only restride if a resize occurred; otherwise we ignore the (advisory)
  // strides from the meta function and directly use the output tensor's
  // preexisting strides
  if (resized) {
    if (!strides.empty()) {
      TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
      // TODO: avoid the redispatch here
      out.as_strided_(sizes, strides);
    } else if (options.memory_format_opt().has_value()) {
      out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
    }
  }
}

void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
  // These checks are needed on those operators that:
  //   1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
  //   2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
  // For other operators (e.g. 'add'), 'TensorIterator' already checks
  // these things separately.
  TORCH_CHECK(options.dtype() == self.dtype(),
      "Bad in-place call: ",
      "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
  TORCH_CHECK(options.device() == self.device(),
      "Bad in-place call: ",
      "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
  TORCH_CHECK(sizes == self.sizes(),
      "Bad in-place call: ",
      "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
}

std::optional<Tensor> maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  if (out.strides() != strides) {
    return at::detail::empty_strided_cpu(sizes, strides, options);
  }
  return std::nullopt;
}
C10_DIAGNOSTIC_POP()
} // namespace
} // namespace at

// See template file RegisterDispatchDefinitions.ini
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_conj_physical_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::conj_physical_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("conj_physical.out",
TORCH_FN(wrapper_CPU_out_conj_physical_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_out_conj_physical_out(self, out);
}
at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_out_conj_physical_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_ufunc_add_CPU_functional final : public at::native::structured_ufunc_add_CPU {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_add_Tensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
structured_ufunc_add_CPU_functional op;
op.meta(self, other, alpha);
op.impl(self, other, alpha, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_ufunc_add_CPU_out final : public at::native::structured_ufunc_add_CPU {
    structured_ufunc_add_CPU_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_add_out_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
structured_ufunc_add_CPU_out op(out);
op.meta(self, other, alpha);
op.impl(self, other, alpha, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_ufunc_add_CPU_inplace final : public at::native::structured_ufunc_add_CPU {
    structured_ufunc_add_CPU_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_add__Tensor(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
structured_ufunc_add_CPU_inplace op(self);
op.meta(self, other, alpha);
op.impl(self, other, alpha, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("add.Tensor", TORCH_FN(wrapper_CPU_add_Tensor));
m.impl("add.out", TORCH_FN(wrapper_CPU_add_out_out));
m.impl("add_.Tensor", TORCH_FN(wrapper_CPU_add__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_CPU_add_Tensor(self, other, alpha);
}
at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_CPU_add_out_out(self, other, alpha, out);
}
at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_CPU_add_out_out(self, other, alpha, out);
}
at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_CPU_add__Tensor(self, other, alpha);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_addmv_out_cpu_functional final : public at::native::structured_addmv_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
structured_addmv_out_cpu_functional op;
op.meta(self, mat, vec, beta, alpha);
op.impl(self, mat, vec, beta, alpha, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_addmv_out_cpu_out final : public at::native::structured_addmv_out_cpu {
    structured_addmv_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_addmv_out_out(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
structured_addmv_out_cpu_out op(out);
op.meta(self, mat, vec, beta, alpha);
op.impl(self, mat, vec, beta, alpha, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_addmv_out_cpu_inplace final : public at::native::structured_addmv_out_cpu {
    structured_addmv_out_cpu_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
structured_addmv_out_cpu_inplace op(self);
op.meta(self, mat, vec, beta, alpha);
op.impl(self, mat, vec, beta, alpha, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("addmv", TORCH_FN(wrapper_CPU_addmv));
m.impl("addmv.out", TORCH_FN(wrapper_CPU_addmv_out_out));
m.impl("addmv_", TORCH_FN(wrapper_CPU_addmv_));
}
} // anonymous namespace
namespace cpu {
at::Tensor addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU_addmv(self, mat, vec, beta, alpha);
}
at::Tensor & addmv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU_addmv_out_out(self, mat, vec, beta, alpha, out);
}
at::Tensor & addmv_outf(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_CPU_addmv_out_out(self, mat, vec, beta, alpha, out);
}
at::Tensor & addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU_addmv_(self, mat, vec, beta, alpha);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::addr(self, vec1, vec2, beta, alpha);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_addr_out(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::addr_out(self, vec1, vec2, beta, alpha, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("addr",
TORCH_FN(wrapper_CPU__addr));
m.impl("addr.out",
TORCH_FN(wrapper_CPU_out_addr_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU__addr(self, vec1, vec2, beta, alpha);
}
at::Tensor & addr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU_out_addr_out(self, vec1, vec2, beta, alpha, out);
}
at::Tensor & addr_outf(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_CPU_out_addr_out(self, vec1, vec2, beta, alpha, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_all_out_functional final : public at::native::structured_all_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_all_dim(const at::Tensor & self, int64_t dim, bool keepdim) {
structured_all_out_functional op;
op.meta(self, dim, keepdim);
op.impl(self, dim, keepdim, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_all_out_out final : public at::native::structured_all_out {
    structured_all_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_all_out_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
structured_all_out_out op(out);
op.meta(self, dim, keepdim);
op.impl(self, dim, keepdim, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("all.dim", TORCH_FN(wrapper_CPU_all_dim));
m.impl("all.out", TORCH_FN(wrapper_CPU_all_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor all(const at::Tensor & self, int64_t dim, bool keepdim) {
return wrapper_CPU_all_dim(self, dim, keepdim);
}
at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim) {
return wrapper_CPU_all_out_out(self, dim, keepdim, out);
}
at::Tensor & all_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
return wrapper_CPU_all_out_out(self, dim, keepdim, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_all_dims_out_functional final : public at::native::structured_all_dims_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_all_dims(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
structured_all_dims_out_functional op;
op.meta(self, dim, keepdim);
op.impl(self, dim, keepdim, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_all_dims_out_out final : public at::native::structured_all_dims_out {
    structured_all_dims_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_all_out_dims_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) {
structured_all_dims_out_out op(out);
op.meta(self, dim, keepdim);
op.impl(self, dim, keepdim, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("all.dims", TORCH_FN(wrapper_CPU_all_dims));
m.impl("all.dims_out", TORCH_FN(wrapper_CPU_all_out_dims_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor all(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
return wrapper_CPU_all_dims(self, dim, keepdim);
}
at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
return wrapper_CPU_all_out_dims_out(self, dim, keepdim, out);
}
at::Tensor & all_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) {
return wrapper_CPU_all_out_dims_out(self, dim, keepdim, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_argmax_out_functional final : public at::native::structured_argmax_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_argmax(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
structured_argmax_out_functional op;
op.meta(self, dim, keepdim);
op.impl(self, dim, keepdim, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_argmax_out_out final : public at::native::structured_argmax_out {
    structured_argmax_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_argmax_out_out(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
structured_argmax_out_out op(out);
op.meta(self, dim, keepdim);
op.impl(self, dim, keepdim, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("argmax", TORCH_FN(wrapper_CPU_argmax));
m.impl("argmax.out", TORCH_FN(wrapper_CPU_argmax_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor argmax(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
return wrapper_CPU_argmax(self, dim, keepdim);
}
at::Tensor & argmax_out(at::Tensor & out, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
return wrapper_CPU_argmax_out_out(self, dim, keepdim, out);
}
at::Tensor & argmax_outf(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
return wrapper_CPU_argmax_out_out(self, dim, keepdim, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__as_strided(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
    // No device check
  // DeviceGuard omitted
  return at::native::as_strided_tensorimpl(self, C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride), storage_offset.has_value() ? ::std::make_optional(storage_offset->guard_int(__FILE__, __LINE__)) : ::std::nullopt);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("as_strided",
TORCH_FN(wrapper_CPU__as_strided));
}
} // anonymous namespace
namespace cpu {
at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional<int64_t> storage_offset) {
return wrapper_CPU__as_strided(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt);
}
at::Tensor as_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
return wrapper_CPU__as_strided(self, size, stride, storage_offset);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_atan_out_functional final : public at::native::structured_atan_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_atan(const at::Tensor & self) {
structured_atan_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_atan_out_out final : public at::native::structured_atan_out {
    structured_atan_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_atan_out_out(const at::Tensor & self, at::Tensor & out) {
structured_atan_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_atan_out_inplace final : public at::native::structured_atan_out {
    structured_atan_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_atan_(at::Tensor & self) {
structured_atan_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("atan", TORCH_FN(wrapper_CPU_atan));
m.impl("atan.out", TORCH_FN(wrapper_CPU_atan_out_out));
m.impl("atan_", TORCH_FN(wrapper_CPU_atan_));
}
} // anonymous namespace
namespace cpu {
at::Tensor atan(const at::Tensor & self) {
return wrapper_CPU_atan(self);
}
at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_atan_out_out(self, out);
}
at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_atan_out_out(self, out);
}
at::Tensor & atan_(at::Tensor & self) {
return wrapper_CPU_atan_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    // No device check
  // DeviceGuard omitted
  return at::native::binary_cross_entropy_cpu(self, target, weight, reduction);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_binary_cross_entropy_out(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::binary_cross_entropy_out_cpu(self, target, weight, reduction, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("binary_cross_entropy",
TORCH_FN(wrapper_CPU__binary_cross_entropy));
m.impl("binary_cross_entropy.out",
TORCH_FN(wrapper_CPU_out_binary_cross_entropy_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
return wrapper_CPU__binary_cross_entropy(self, target, weight, reduction);
}
at::Tensor & binary_cross_entropy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
return wrapper_CPU_out_binary_cross_entropy_out(self, target, weight, reduction, out);
}
at::Tensor & binary_cross_entropy_outf(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
return wrapper_CPU_out_binary_cross_entropy_out(self, target, weight, reduction, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_bitwise_not_out_functional final : public at::native::structured_bitwise_not_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_bitwise_not(const at::Tensor & self) {
structured_bitwise_not_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_bitwise_not_out_out final : public at::native::structured_bitwise_not_out {
    structured_bitwise_not_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_bitwise_not_out_out(const at::Tensor & self, at::Tensor & out) {
structured_bitwise_not_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_bitwise_not_out_inplace final : public at::native::structured_bitwise_not_out {
    structured_bitwise_not_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_bitwise_not_(at::Tensor & self) {
structured_bitwise_not_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("bitwise_not", TORCH_FN(wrapper_CPU_bitwise_not));
m.impl("bitwise_not.out", TORCH_FN(wrapper_CPU_bitwise_not_out_out));
m.impl("bitwise_not_", TORCH_FN(wrapper_CPU_bitwise_not_));
}
} // anonymous namespace
namespace cpu {
at::Tensor bitwise_not(const at::Tensor & self) {
return wrapper_CPU_bitwise_not(self);
}
at::Tensor & bitwise_not_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_bitwise_not_out_out(self, out);
}
at::Tensor & bitwise_not_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_bitwise_not_out_out(self, out);
}
at::Tensor & bitwise_not_(at::Tensor & self) {
return wrapper_CPU_bitwise_not_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_logical_xor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::logical_xor_out(self, other, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("logical_xor.out",
TORCH_FN(wrapper_CPU_out_logical_xor_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & logical_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_out_logical_xor_out(self, other, out);
}
at::Tensor & logical_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_out_logical_xor_out(self, other, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_clamp_out_functional final : public at::native::structured_clamp_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_clamp(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
structured_clamp_out_functional op;
op.meta(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()));
op.impl(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()), op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_clamp_out_out final : public at::native::structured_clamp_out {
    structured_clamp_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp_out_out(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
structured_clamp_out_out op(out);
op.meta(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()));
op.impl(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()), op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_clamp_out_inplace final : public at::native::structured_clamp_out {
    structured_clamp_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp_(at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
structured_clamp_out_inplace op(self);
op.meta(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()));
op.impl(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()), op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("clamp", TORCH_FN(wrapper_CPU_clamp));
m.impl("clamp.out", TORCH_FN(wrapper_CPU_clamp_out_out));
m.impl("clamp_", TORCH_FN(wrapper_CPU_clamp_));
}
} // anonymous namespace
namespace cpu {
at::Tensor clamp(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
return wrapper_CPU_clamp(self, min, max);
}
at::Tensor & clamp_out(at::Tensor & out, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
return wrapper_CPU_clamp_out_out(self, min, max, out);
}
at::Tensor & clamp_outf(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
return wrapper_CPU_clamp_out_out(self, min, max, out);
}
at::Tensor & clamp_(at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
return wrapper_CPU_clamp_(self, min, max);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_clamp_Tensor_out_functional final : public at::native::structured_clamp_Tensor_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_clamp_Tensor(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
structured_clamp_Tensor_out_functional op;
op.meta(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()));
op.impl(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()), op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_clamp_Tensor_out_out final : public at::native::structured_clamp_Tensor_out {
    structured_clamp_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp_out_Tensor_out(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
structured_clamp_Tensor_out_out op(out);
op.meta(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()));
op.impl(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()), op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_clamp_Tensor_out_inplace final : public at::native::structured_clamp_Tensor_out {
    structured_clamp_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp__Tensor(at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
structured_clamp_Tensor_out_inplace op(self);
op.meta(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()));
op.impl(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()), op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("clamp.Tensor", TORCH_FN(wrapper_CPU_clamp_Tensor));
m.impl("clamp.Tensor_out", TORCH_FN(wrapper_CPU_clamp_out_Tensor_out));
m.impl("clamp_.Tensor", TORCH_FN(wrapper_CPU_clamp__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor clamp(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
return wrapper_CPU_clamp_Tensor(self, min, max);
}
at::Tensor & clamp_out(at::Tensor & out, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
return wrapper_CPU_clamp_out_Tensor_out(self, min, max, out);
}
at::Tensor & clamp_outf(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
return wrapper_CPU_clamp_out_Tensor_out(self, min, max, out);
}
at::Tensor & clamp_(at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
return wrapper_CPU_clamp__Tensor(self, min, max);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
void wrapper_CPU___cummin_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::cummin_helper_cpu(self, values, indices, dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_cummin_helper",
TORCH_FN(wrapper_CPU___cummin_helper));
}
} // anonymous namespace
namespace cpu {
void _cummin_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
return wrapper_CPU___cummin_helper(self, values, indices, dim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__dot(const at::Tensor & self, const at::Tensor & tensor) {
    // No device check
  // DeviceGuard omitted
  return at::native::dot(self, tensor);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("dot",
TORCH_FN(wrapper_CPU__dot));
}
} // anonymous namespace
namespace cpu {
at::Tensor dot(const at::Tensor & self, const at::Tensor & tensor) {
return wrapper_CPU__dot(self, tensor);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__vdot(const at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::vdot(self, other);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("vdot",
TORCH_FN(wrapper_CPU__vdot));
}
} // anonymous namespace
namespace cpu {
at::Tensor vdot(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU__vdot(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
    // No device check
  // DeviceGuard omitted
  return at::native::_embedding_bag_backward_symint(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_embedding_bag_backward",
TORCH_FN(wrapper_CPU___embedding_bag_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
return wrapper_CPU___embedding_bag_backward(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
}
at::Tensor _embedding_bag_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
return wrapper_CPU___embedding_bag_backward(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___empty_affine_quantized(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::empty_affine_quantized_other_backends_stub(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, scale, zero_point, memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_empty_affine_quantized",
TORCH_FN(wrapper_CPU___empty_affine_quantized));
}
} // anonymous namespace
namespace cpu {
at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_CPU___empty_affine_quantized(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor _empty_affine_quantized(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_CPU___empty_affine_quantized(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, scale, zero_point, memory_format);
}
at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, at::TensorOptions options, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_CPU___empty_affine_quantized(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_CPU___empty_affine_quantized(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_frac_out_functional final : public at::native::structured_frac_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_frac(const at::Tensor & self) {
structured_frac_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_frac_out_out final : public at::native::structured_frac_out {
    structured_frac_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_frac_out_out(const at::Tensor & self, at::Tensor & out) {
structured_frac_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_frac_out_inplace final : public at::native::structured_frac_out {
    structured_frac_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_frac_(at::Tensor & self) {
structured_frac_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("frac", TORCH_FN(wrapper_CPU_frac));
m.impl("frac.out", TORCH_FN(wrapper_CPU_frac_out_out));
m.impl("frac_", TORCH_FN(wrapper_CPU_frac_));
}
} // anonymous namespace
namespace cpu {
at::Tensor frac(const at::Tensor & self) {
return wrapper_CPU_frac(self);
}
at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_frac_out_out(self, out);
}
at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_frac_out_out(self, out);
}
at::Tensor & frac_(at::Tensor & self) {
return wrapper_CPU_frac_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_lcm_out_functional final : public at::native::structured_lcm_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_lcm(const at::Tensor & self, const at::Tensor & other) {
structured_lcm_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_lcm_out_out final : public at::native::structured_lcm_out {
    structured_lcm_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_lcm_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_lcm_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_lcm_out_inplace final : public at::native::structured_lcm_out {
    structured_lcm_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_lcm_(at::Tensor & self, const at::Tensor & other) {
structured_lcm_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("lcm", TORCH_FN(wrapper_CPU_lcm));
m.impl("lcm.out", TORCH_FN(wrapper_CPU_lcm_out_out));
m.impl("lcm_", TORCH_FN(wrapper_CPU_lcm_));
}
} // anonymous namespace
namespace cpu {
at::Tensor lcm(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_lcm(self, other);
}
at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_lcm_out_out(self, other, out);
}
at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_lcm_out_out(self, other, out);
}
at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_lcm_(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU___index_put_impl_(at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
    // No device check
  // DeviceGuard omitted
  return at::native::_index_put_impl_(self, indices, values, accumulate, unsafe);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_index_put_impl_",
TORCH_FN(wrapper_CPU___index_put_impl_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
return wrapper_CPU___index_put_impl_(self, indices, values, accumulate, unsafe);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_values_kthvalue_out(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    // No device check
  // DeviceGuard omitted
  return at::native::kthvalue_out_cpu(self, k, dim, keepdim, values, indices);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("kthvalue.values",
TORCH_FN(wrapper_CPU_values_kthvalue_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor &,at::Tensor &> kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) {
return wrapper_CPU_values_kthvalue_out(self, k, dim, keepdim, values, indices);
}
::std::tuple<at::Tensor &,at::Tensor &> kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
return wrapper_CPU_values_kthvalue_out(self, k, dim, keepdim, values, indices);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__native_layer_norm(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps) {
    // No device check
  // DeviceGuard omitted
  return at::native::layer_norm_cpu(input, C10_AS_INTARRAYREF_SLOW(normalized_shape), weight, bias, eps);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("native_layer_norm",
TORCH_FN(wrapper_CPU__native_layer_norm));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps) {
return wrapper_CPU__native_layer_norm(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps);
}
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps) {
return wrapper_CPU__native_layer_norm(input, normalized_shape, weight, bias, eps);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
    // No device check
  // DeviceGuard omitted
  return at::native::layer_norm_backward_cpu(grad_out, input, C10_AS_INTARRAYREF_SLOW(normalized_shape), mean, rstd, weight, bias, output_mask);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("native_layer_norm_backward",
TORCH_FN(wrapper_CPU__native_layer_norm_backward));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
return wrapper_CPU__native_layer_norm_backward(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask);
}
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_symint(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
return wrapper_CPU__native_layer_norm_backward(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_log10_out_functional final : public at::native::structured_log10_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_log10(const at::Tensor & self) {
structured_log10_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_log10_out_out final : public at::native::structured_log10_out {
    structured_log10_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_log10_out_out(const at::Tensor & self, at::Tensor & out) {
structured_log10_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_log10_out_inplace final : public at::native::structured_log10_out {
    structured_log10_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_log10_(at::Tensor & self) {
structured_log10_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("log10", TORCH_FN(wrapper_CPU_log10));
m.impl("log10.out", TORCH_FN(wrapper_CPU_log10_out_out));
m.impl("log10_", TORCH_FN(wrapper_CPU_log10_));
}
} // anonymous namespace
namespace cpu {
at::Tensor log10(const at::Tensor & self) {
return wrapper_CPU_log10(self);
}
at::Tensor & log10_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_log10_out_out(self, out);
}
at::Tensor & log10_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_log10_out_out(self, out);
}
at::Tensor & log10_(at::Tensor & self) {
return wrapper_CPU_log10_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_log1p_out_functional final : public at::native::structured_log1p_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_log1p(const at::Tensor & self) {
structured_log1p_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_log1p_out_out final : public at::native::structured_log1p_out {
    structured_log1p_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_log1p_out_out(const at::Tensor & self, at::Tensor & out) {
structured_log1p_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_log1p_out_inplace final : public at::native::structured_log1p_out {
    structured_log1p_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_log1p_(at::Tensor & self) {
structured_log1p_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("log1p", TORCH_FN(wrapper_CPU_log1p));
m.impl("log1p.out", TORCH_FN(wrapper_CPU_log1p_out_out));
m.impl("log1p_", TORCH_FN(wrapper_CPU_log1p_));
}
} // anonymous namespace
namespace cpu {
at::Tensor log1p(const at::Tensor & self) {
return wrapper_CPU_log1p(self);
}
at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_log1p_out_out(self, out);
}
at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_log1p_out_out(self, out);
}
at::Tensor & log1p_(at::Tensor & self) {
return wrapper_CPU_log1p_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__mkldnn_rnn_layer(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
    // No device check
  // DeviceGuard omitted
  return at::native::mkldnn_rnn_layer(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("mkldnn_rnn_layer",
TORCH_FN(wrapper_CPU__mkldnn_rnn_layer));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
return wrapper_CPU__mkldnn_rnn_layer(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_mm_out_cpu_functional final : public at::native::structured_mm_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_mm(const at::Tensor & self, const at::Tensor & mat2) {
structured_mm_out_cpu_functional op;
op.meta(self, mat2);
op.impl(self, mat2, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_mm_out_cpu_out final : public at::native::structured_mm_out_cpu {
    structured_mm_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_mm_out_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
structured_mm_out_cpu_out op(out);
op.meta(self, mat2);
op.impl(self, mat2, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("mm", TORCH_FN(wrapper_CPU_mm));
m.impl("mm.out", TORCH_FN(wrapper_CPU_mm_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_CPU_mm(self, mat2);
}
at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_CPU_mm_out_out(self, mat2, out);
}
at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
return wrapper_CPU_mm_out_out(self, mat2, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___dyn_quant_matmul_4bit(const at::Tensor & inp, const at::Tensor & packed_weights, int64_t block_size, int64_t in_features, int64_t out_features) {
    // No device check
  // DeviceGuard omitted
  return at::native::_dyn_quant_matmul_4bit_cpu(inp, packed_weights, block_size, in_features, out_features);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_dyn_quant_matmul_4bit",
TORCH_FN(wrapper_CPU___dyn_quant_matmul_4bit));
}
} // anonymous namespace
namespace cpu {
at::Tensor _dyn_quant_matmul_4bit(const at::Tensor & inp, const at::Tensor & packed_weights, int64_t block_size, int64_t in_features, int64_t out_features) {
return wrapper_CPU___dyn_quant_matmul_4bit(inp, packed_weights, block_size, in_features, out_features);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_mul_out_functional final : public at::native::structured_mul_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_mul_Tensor(const at::Tensor & self, const at::Tensor & other) {
structured_mul_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_mul_out_out final : public at::native::structured_mul_out {
    structured_mul_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_mul_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_mul_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_mul_out_inplace final : public at::native::structured_mul_out {
    structured_mul_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_mul__Tensor(at::Tensor & self, const at::Tensor & other) {
structured_mul_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("mul.Tensor", TORCH_FN(wrapper_CPU_mul_Tensor));
m.impl("mul.out", TORCH_FN(wrapper_CPU_mul_out_out));
m.impl("mul_.Tensor", TORCH_FN(wrapper_CPU_mul__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor mul(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_mul_Tensor(self, other);
}
at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_mul_out_out(self, other, out);
}
at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_mul_out_out(self, other, out);
}
at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_mul__Tensor(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_mvlgamma_out(const at::Tensor & self, int64_t p, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::mvlgamma_out(self, p, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("mvlgamma.out",
TORCH_FN(wrapper_CPU_out_mvlgamma_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & mvlgamma_out(at::Tensor & out, const at::Tensor & self, int64_t p) {
return wrapper_CPU_out_mvlgamma_out(self, p, out);
}
at::Tensor & mvlgamma_outf(const at::Tensor & self, int64_t p, at::Tensor & out) {
return wrapper_CPU_out_mvlgamma_out(self, p, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___reshape_alias(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    // No device check
  // DeviceGuard omitted
  return at::native::_reshape_alias(self, C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride));
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_reshape_alias",
TORCH_FN(wrapper_CPU___reshape_alias));
}
} // anonymous namespace
namespace cpu {
at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
return wrapper_CPU___reshape_alias(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
}
at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
return wrapper_CPU___reshape_alias(self, size, stride);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_round_out_functional final : public at::native::structured_round_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_round(const at::Tensor & self) {
structured_round_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_round_out_out final : public at::native::structured_round_out {
    structured_round_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_round_out_out(const at::Tensor & self, at::Tensor & out) {
structured_round_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_round_out_inplace final : public at::native::structured_round_out {
    structured_round_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_round_(at::Tensor & self) {
structured_round_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("round", TORCH_FN(wrapper_CPU_round));
m.impl("round.out", TORCH_FN(wrapper_CPU_round_out_out));
m.impl("round_", TORCH_FN(wrapper_CPU_round_));
}
} // anonymous namespace
namespace cpu {
at::Tensor round(const at::Tensor & self) {
return wrapper_CPU_round(self);
}
at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_round_out_out(self, out);
}
at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_round_out_out(self, out);
}
at::Tensor & round_(at::Tensor & self) {
return wrapper_CPU_round_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_round_decimals_out_functional final : public at::native::structured_round_decimals_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_round_decimals(const at::Tensor & self, int64_t decimals) {
structured_round_decimals_out_functional op;
op.meta(self, decimals);
op.impl(self, decimals, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_round_decimals_out_out final : public at::native::structured_round_decimals_out {
    structured_round_decimals_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_round_out_decimals_out(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
structured_round_decimals_out_out op(out);
op.meta(self, decimals);
op.impl(self, decimals, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_round_decimals_out_inplace final : public at::native::structured_round_decimals_out {
    structured_round_decimals_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_round__decimals(at::Tensor & self, int64_t decimals) {
structured_round_decimals_out_inplace op(self);
op.meta(self, decimals);
op.impl(self, decimals, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("round.decimals", TORCH_FN(wrapper_CPU_round_decimals));
m.impl("round.decimals_out", TORCH_FN(wrapper_CPU_round_out_decimals_out));
m.impl("round_.decimals", TORCH_FN(wrapper_CPU_round__decimals));
}
} // anonymous namespace
namespace cpu {
at::Tensor round(const at::Tensor & self, int64_t decimals) {
return wrapper_CPU_round_decimals(self, decimals);
}
at::Tensor & round_out(at::Tensor & out, const at::Tensor & self, int64_t decimals) {
return wrapper_CPU_round_out_decimals_out(self, decimals, out);
}
at::Tensor & round_outf(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
return wrapper_CPU_round_out_decimals_out(self, decimals, out);
}
at::Tensor & round_(at::Tensor & self, int64_t decimals) {
return wrapper_CPU_round__decimals(self, decimals);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___prelu_kernel_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
    // No device check
  // DeviceGuard omitted
  return at::native::_prelu_kernel_backward(grad_output, self, weight);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_prelu_kernel_backward",
TORCH_FN(wrapper_CPU___prelu_kernel_backward));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
return wrapper_CPU___prelu_kernel_backward(grad_output, self, weight);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_rsqrt_out_functional final : public at::native::structured_rsqrt_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_rsqrt(const at::Tensor & self) {
structured_rsqrt_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_rsqrt_out_out final : public at::native::structured_rsqrt_out {
    structured_rsqrt_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_rsqrt_out_out(const at::Tensor & self, at::Tensor & out) {
structured_rsqrt_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_rsqrt_out_inplace final : public at::native::structured_rsqrt_out {
    structured_rsqrt_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_rsqrt_(at::Tensor & self) {
structured_rsqrt_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("rsqrt", TORCH_FN(wrapper_CPU_rsqrt));
m.impl("rsqrt.out", TORCH_FN(wrapper_CPU_rsqrt_out_out));
m.impl("rsqrt_", TORCH_FN(wrapper_CPU_rsqrt_));
}
} // anonymous namespace
namespace cpu {
at::Tensor rsqrt(const at::Tensor & self) {
return wrapper_CPU_rsqrt(self);
}
at::Tensor & rsqrt_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_rsqrt_out_out(self, out);
}
at::Tensor & rsqrt_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_rsqrt_out_out(self, out);
}
at::Tensor & rsqrt_(at::Tensor & self) {
return wrapper_CPU_rsqrt_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_sigmoid_out_functional final : public at::native::structured_sigmoid_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_sigmoid(const at::Tensor & self) {
structured_sigmoid_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_sigmoid_out_out final : public at::native::structured_sigmoid_out {
    structured_sigmoid_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_sigmoid_out_out(const at::Tensor & self, at::Tensor & out) {
structured_sigmoid_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_sigmoid_out_inplace final : public at::native::structured_sigmoid_out {
    structured_sigmoid_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_sigmoid_(at::Tensor & self) {
structured_sigmoid_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("sigmoid", TORCH_FN(wrapper_CPU_sigmoid));
m.impl("sigmoid.out", TORCH_FN(wrapper_CPU_sigmoid_out_out));
m.impl("sigmoid_", TORCH_FN(wrapper_CPU_sigmoid_));
}
} // anonymous namespace
namespace cpu {
at::Tensor sigmoid(const at::Tensor & self) {
return wrapper_CPU_sigmoid(self);
}
at::Tensor & sigmoid_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_sigmoid_out_out(self, out);
}
at::Tensor & sigmoid_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_sigmoid_out_out(self, out);
}
at::Tensor & sigmoid_(at::Tensor & self) {
return wrapper_CPU_sigmoid_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_sinc_out_functional final : public at::native::structured_sinc_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_sinc(const at::Tensor & self) {
structured_sinc_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_sinc_out_out final : public at::native::structured_sinc_out {
    structured_sinc_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_sinc_out_out(const at::Tensor & self, at::Tensor & out) {
structured_sinc_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_sinc_out_inplace final : public at::native::structured_sinc_out {
    structured_sinc_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_sinc_(at::Tensor & self) {
structured_sinc_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("sinc", TORCH_FN(wrapper_CPU_sinc));
m.impl("sinc.out", TORCH_FN(wrapper_CPU_sinc_out_out));
m.impl("sinc_", TORCH_FN(wrapper_CPU_sinc_));
}
} // anonymous namespace
namespace cpu {
at::Tensor sinc(const at::Tensor & self) {
return wrapper_CPU_sinc(self);
}
at::Tensor & sinc_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_sinc_out_out(self, out);
}
at::Tensor & sinc_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_sinc_out_out(self, out);
}
at::Tensor & sinc_(at::Tensor & self) {
return wrapper_CPU_sinc_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_sinh_out_functional final : public at::native::structured_sinh_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_sinh(const at::Tensor & self) {
structured_sinh_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_sinh_out_out final : public at::native::structured_sinh_out {
    structured_sinh_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_sinh_out_out(const at::Tensor & self, at::Tensor & out) {
structured_sinh_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_sinh_out_inplace final : public at::native::structured_sinh_out {
    structured_sinh_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_sinh_(at::Tensor & self) {
structured_sinh_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("sinh", TORCH_FN(wrapper_CPU_sinh));
m.impl("sinh.out", TORCH_FN(wrapper_CPU_sinh_out_out));
m.impl("sinh_", TORCH_FN(wrapper_CPU_sinh_));
}
} // anonymous namespace
namespace cpu {
at::Tensor sinh(const at::Tensor & self) {
return wrapper_CPU_sinh(self);
}
at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_sinh_out_out(self, out);
}
at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_sinh_out_out(self, out);
}
at::Tensor & sinh_(at::Tensor & self) {
return wrapper_CPU_sinh_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_correction_std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    // No device check
  // DeviceGuard omitted
  return at::native::std_mean(self, dim, correction, keepdim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("std_mean.correction",
TORCH_FN(wrapper_CPU_correction_std_mean));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
return wrapper_CPU_correction_std_mean(self, dim, correction, keepdim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__flip(const at::Tensor & self, at::IntArrayRef dims) {
    // No device check
  // DeviceGuard omitted
  return at::native::flip(self, dims);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("flip",
TORCH_FN(wrapper_CPU__flip));
}
} // anonymous namespace
namespace cpu {
at::Tensor flip(const at::Tensor & self, at::IntArrayRef dims) {
return wrapper_CPU__flip(self, dims);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___nested_tensor_from_mask(const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
    // No device check
  // DeviceGuard omitted
  return at::native::NestedTensor_nested_tensor_from_mask(t, mask, mask_check);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_nested_tensor_from_mask",
TORCH_FN(wrapper_CPU___nested_tensor_from_mask));
}
} // anonymous namespace
namespace cpu {
at::Tensor _nested_tensor_from_mask(const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
return wrapper_CPU___nested_tensor_from_mask(t, mask, mask_check);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
bool wrapper_CPU___nested_tensor_from_mask_left_aligned(const at::Tensor & t, const at::Tensor & mask) {
    // No device check
  // DeviceGuard omitted
  return at::native::NestedTensor_nested_tensor_from_mask_left_aligned(t, mask);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_nested_tensor_from_mask_left_aligned",
TORCH_FN(wrapper_CPU___nested_tensor_from_mask_left_aligned));
}
} // anonymous namespace
namespace cpu {
bool _nested_tensor_from_mask_left_aligned(const at::Tensor & t, const at::Tensor & mask) {
return wrapper_CPU___nested_tensor_from_mask_left_aligned(t, mask);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___nested_compute_contiguous_strides_offsets(const at::Tensor & nested_size) {
    // No device check
  // DeviceGuard omitted
  return at::native::_nested_compute_contiguous_strides_offsets(nested_size);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_nested_compute_contiguous_strides_offsets",
TORCH_FN(wrapper_CPU___nested_compute_contiguous_strides_offsets));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _nested_compute_contiguous_strides_offsets(const at::Tensor & nested_size) {
return wrapper_CPU___nested_compute_contiguous_strides_offsets(nested_size);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__unique_dim(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
    // No device check
  // DeviceGuard omitted
  return at::native::unique_dim_cpu(self, dim, sorted, return_inverse, return_counts);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("unique_dim",
TORCH_FN(wrapper_CPU__unique_dim));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
return wrapper_CPU__unique_dim(self, dim, sorted, return_inverse, return_counts);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_correction_var_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    // No device check
  // DeviceGuard omitted
  return at::native::var_mean(self, dim, correction, keepdim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("var_mean.correction",
TORCH_FN(wrapper_CPU_correction_var_mean));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
return wrapper_CPU_correction_var_mean(self, dim, correction, keepdim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___spdiags(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional<at::Layout> layout) {
    // No device check
  // DeviceGuard omitted
  return at::native::spdiags(diagonals, offsets, shape, layout);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_spdiags",
TORCH_FN(wrapper_CPU___spdiags));
}
} // anonymous namespace
namespace cpu {
at::Tensor _spdiags(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional<at::Layout> layout) {
return wrapper_CPU___spdiags(diagonals, offsets, shape, layout);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Tensor_rsub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::rsub(self, other, alpha);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("rsub.Tensor",
TORCH_FN(wrapper_CPU_Tensor_rsub));
}
} // anonymous namespace
namespace cpu {
at::Tensor rsub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_CPU_Tensor_rsub(self, other, alpha);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___to_sparse_csr(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::dense_to_sparse_csr(self, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_to_sparse_csr",
TORCH_FN(wrapper_CPU___to_sparse_csr));
}
} // anonymous namespace
namespace cpu {
at::Tensor _to_sparse_csr(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
return wrapper_CPU___to_sparse_csr(self, dense_dim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___to_sparse_csc(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::dense_to_sparse_csc(self, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_to_sparse_csc",
TORCH_FN(wrapper_CPU___to_sparse_csc));
}
} // anonymous namespace
namespace cpu {
at::Tensor _to_sparse_csc(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
return wrapper_CPU___to_sparse_csc(self, dense_dim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_self_dequantize(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::dequantize_cpu_or_cuda(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("dequantize.self",
TORCH_FN(wrapper_CPU_self_dequantize));
}
} // anonymous namespace
namespace cpu {
at::Tensor dequantize(const at::Tensor & self) {
return wrapper_CPU_self_dequantize(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___fake_quantize_learnable_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fake_quantize_learnable_per_channel_affine(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fake_quantize_learnable_per_channel_affine",
TORCH_FN(wrapper_CPU___fake_quantize_learnable_per_channel_affine));
}
} // anonymous namespace
namespace cpu {
at::Tensor _fake_quantize_learnable_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
return wrapper_CPU___fake_quantize_learnable_per_channel_affine(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___fused_moving_avg_obs_fq_helper(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
    // No device check
  // DeviceGuard omitted
  return at::native::fused_moving_avg_obs_fake_quant_cpu(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fused_moving_avg_obs_fq_helper",
TORCH_FN(wrapper_CPU___fused_moving_avg_obs_fq_helper));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
return wrapper_CPU___fused_moving_avg_obs_fq_helper(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Scalar wrapper_CPU___local_scalar_dense(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_local_scalar_dense_cpu(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_local_scalar_dense",
TORCH_FN(wrapper_CPU___local_scalar_dense));
}
} // anonymous namespace
namespace cpu {
at::Scalar _local_scalar_dense(const at::Tensor & self) {
return wrapper_CPU___local_scalar_dense(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
bool wrapper_CPU__is_set_to(const at::Tensor & self, const at::Tensor & tensor) {
    // No device check
  // DeviceGuard omitted
  return at::native::is_set_to(self, tensor);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("is_set_to",
TORCH_FN(wrapper_CPU__is_set_to));
}
} // anonymous namespace
namespace cpu {
bool is_set_to(const at::Tensor & self, const at::Tensor & tensor) {
return wrapper_CPU__is_set_to(self, tensor);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_scatter_reduce_two_functional final : public at::native::structured_scatter_reduce_two {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_scatter_reduce_two(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
structured_scatter_reduce_two_functional op;
op.meta(self, dim, index, src, reduce, include_self);
op.impl(self, dim, index, src, reduce, include_self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_scatter_reduce_two_out final : public at::native::structured_scatter_reduce_two {
    structured_scatter_reduce_two_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter_reduce_out_two_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) {
structured_scatter_reduce_two_out op(out);
op.meta(self, dim, index, src, reduce, include_self);
op.impl(self, dim, index, src, reduce, include_self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_scatter_reduce_two_inplace final : public at::native::structured_scatter_reduce_two {
    structured_scatter_reduce_two_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter_reduce__two(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
structured_scatter_reduce_two_inplace op(self);
op.meta(self, dim, index, src, reduce, include_self);
op.impl(self, dim, index, src, reduce, include_self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("scatter_reduce.two", TORCH_FN(wrapper_CPU_scatter_reduce_two));
m.impl("scatter_reduce.two_out", TORCH_FN(wrapper_CPU_scatter_reduce_out_two_out));
m.impl("scatter_reduce_.two", TORCH_FN(wrapper_CPU_scatter_reduce__two));
}
} // anonymous namespace
namespace cpu {
at::Tensor scatter_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
return wrapper_CPU_scatter_reduce_two(self, dim, index, src, reduce, include_self);
}
at::Tensor & scatter_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
return wrapper_CPU_scatter_reduce_out_two_out(self, dim, index, src, reduce, include_self, out);
}
at::Tensor & scatter_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) {
return wrapper_CPU_scatter_reduce_out_two_out(self, dim, index, src, reduce, include_self, out);
}
at::Tensor & scatter_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
return wrapper_CPU_scatter_reduce__two(self, dim, index, src, reduce, include_self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_bitwise_xor_out_functional final : public at::native::structured_bitwise_xor_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_bitwise_xor_Tensor(const at::Tensor & self, const at::Tensor & other) {
structured_bitwise_xor_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_bitwise_xor_out_out final : public at::native::structured_bitwise_xor_out {
    structured_bitwise_xor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_bitwise_xor_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_bitwise_xor_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_bitwise_xor_out_inplace final : public at::native::structured_bitwise_xor_out {
    structured_bitwise_xor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_bitwise_xor__Tensor(at::Tensor & self, const at::Tensor & other) {
structured_bitwise_xor_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("bitwise_xor.Tensor", TORCH_FN(wrapper_CPU_bitwise_xor_Tensor));
m.impl("bitwise_xor.Tensor_out", TORCH_FN(wrapper_CPU_bitwise_xor_out_Tensor_out));
m.impl("bitwise_xor_.Tensor", TORCH_FN(wrapper_CPU_bitwise_xor__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor bitwise_xor(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_bitwise_xor_Tensor(self, other);
}
at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_bitwise_xor_out_Tensor_out(self, other, out);
}
at::Tensor & bitwise_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_bitwise_xor_out_Tensor_out(self, other, out);
}
at::Tensor & bitwise_xor_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_bitwise_xor__Tensor(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::addbmm(self, batch1, batch2, beta, alpha);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_addbmm_out(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::addbmm_out(self, batch1, batch2, beta, alpha, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU__addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::addbmm_(self, batch1, batch2, beta, alpha);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("addbmm",
TORCH_FN(wrapper_CPU__addbmm));
m.impl("addbmm.out",
TORCH_FN(wrapper_CPU_out_addbmm_out));
m.impl("addbmm_",
TORCH_FN(wrapper_CPU__addbmm_));
}
} // anonymous namespace
namespace cpu {
at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU__addbmm(self, batch1, batch2, beta, alpha);
}
at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU_out_addbmm_out(self, batch1, batch2, beta, alpha, out);
}
at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_CPU_out_addbmm_out(self, batch1, batch2, beta, alpha, out);
}
at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU__addbmm_(self, batch1, batch2, beta, alpha);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_from_random_(at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::random_(self, from, to, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("random_.from",
TORCH_FN(wrapper_CPU_from_random_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & random_(at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator) {
return wrapper_CPU_from_random_(self, from, to, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_to_random_(at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::random_(self, to, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("random_.to",
TORCH_FN(wrapper_CPU_to_random_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & random_(at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator) {
return wrapper_CPU_to_random_(self, to, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU__random_(at::Tensor & self, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::random_(self, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("random_",
TORCH_FN(wrapper_CPU__random_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & random_(at::Tensor & self, ::std::optional<at::Generator> generator) {
return wrapper_CPU__random_(self, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU__exponential_(at::Tensor & self, double lambd, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::exponential_(self, lambd, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("exponential_",
TORCH_FN(wrapper_CPU__exponential_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & exponential_(at::Tensor & self, double lambd, ::std::optional<at::Generator> generator) {
return wrapper_CPU__exponential_(self, lambd, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU__geometric_(at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::geometric_(self, p, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("geometric_",
TORCH_FN(wrapper_CPU__geometric_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & geometric_(at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
return wrapper_CPU__geometric_(self, p, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__trace(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::trace_cpu(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("trace",
TORCH_FN(wrapper_CPU__trace));
}
} // anonymous namespace
namespace cpu {
at::Tensor trace(const at::Tensor & self) {
return wrapper_CPU__trace(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
    // No device check
  // DeviceGuard omitted
  return at::native::index_select_cpu_(self, dim, index);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_index_select_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::index_select_out_cpu_(self, dim, index, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("index_select",
TORCH_FN(wrapper_CPU__index_select));
m.impl("index_select.out",
TORCH_FN(wrapper_CPU_out_index_select_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
return wrapper_CPU__index_select(self, dim, index);
}
at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index) {
return wrapper_CPU_out_index_select_out(self, dim, index, out);
}
at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
return wrapper_CPU_out_index_select_out(self, dim, index, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__masked_select(const at::Tensor & self, const at::Tensor & mask) {
    // No device check
  // DeviceGuard omitted
  return at::native::masked_select_cpu(self, mask);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_masked_select_out(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::masked_select_out_cpu(self, mask, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("masked_select",
TORCH_FN(wrapper_CPU__masked_select));
m.impl("masked_select.out",
TORCH_FN(wrapper_CPU_out_masked_select_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor masked_select(const at::Tensor & self, const at::Tensor & mask) {
return wrapper_CPU__masked_select(self, mask);
}
at::Tensor & masked_select_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask) {
return wrapper_CPU_out_masked_select_out(self, mask, out);
}
at::Tensor & masked_select_outf(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
return wrapper_CPU_out_masked_select_out(self, mask, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
    // No device check
  // DeviceGuard omitted
  return at::native::linalg_solve_triangular(self, B, upper, left, unitriangular);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_linalg_solve_triangular_out(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::linalg_solve_triangular_out(self, B, upper, left, unitriangular, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_solve_triangular",
TORCH_FN(wrapper_CPU__linalg_solve_triangular));
m.impl("linalg_solve_triangular.out",
TORCH_FN(wrapper_CPU_out_linalg_solve_triangular_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
return wrapper_CPU__linalg_solve_triangular(self, B, upper, left, unitriangular);
}
at::Tensor & linalg_solve_triangular_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
return wrapper_CPU_out_linalg_solve_triangular_out(self, B, upper, left, unitriangular, out);
}
at::Tensor & linalg_solve_triangular_outf(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) {
return wrapper_CPU_out_linalg_solve_triangular_out(self, B, upper, left, unitriangular, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__multinomial(const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::multinomial(self, num_samples, replacement, generator);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_multinomial_out(const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional<at::Generator> generator, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::multinomial_out(self, num_samples, replacement, generator, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("multinomial",
TORCH_FN(wrapper_CPU__multinomial));
m.impl("multinomial.out",
TORCH_FN(wrapper_CPU_out_multinomial_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor multinomial(const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional<at::Generator> generator) {
return wrapper_CPU__multinomial(self, num_samples, replacement, generator);
}
at::Tensor & multinomial_out(at::Tensor & out, const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional<at::Generator> generator) {
return wrapper_CPU_out_multinomial_out(self, num_samples, replacement, generator, out);
}
at::Tensor & multinomial_outf(const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional<at::Generator> generator, at::Tensor & out) {
return wrapper_CPU_out_multinomial_out(self, num_samples, replacement, generator, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_bins_tensor_histogram(const at::Tensor & self, const at::Tensor & bins, const ::std::optional<at::Tensor> & weight, bool density) {
    // No device check
  // DeviceGuard omitted
  return at::native::histogram(self, bins, weight, density);
}
} // anonymous namespace
namespace {
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_bins_tensor_out_histogram_out(const at::Tensor & self, const at::Tensor & bins, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
    // No device check
  // DeviceGuard omitted
  return at::native::histogram_out(self, bins, weight, density, hist, bin_edges);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("histogram.bins_tensor",
TORCH_FN(wrapper_CPU_bins_tensor_histogram));
m.impl("histogram.bins_tensor_out",
TORCH_FN(wrapper_CPU_bins_tensor_out_histogram_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> histogram(const at::Tensor & self, const at::Tensor & bins, const ::std::optional<at::Tensor> & weight, bool density) {
return wrapper_CPU_bins_tensor_histogram(self, bins, weight, density);
}
::std::tuple<at::Tensor &,at::Tensor &> histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, const at::Tensor & bins, const ::std::optional<at::Tensor> & weight, bool density) {
return wrapper_CPU_bins_tensor_out_histogram_out(self, bins, weight, density, hist, bin_edges);
}
::std::tuple<at::Tensor &,at::Tensor &> histogram_outf(const at::Tensor & self, const at::Tensor & bins, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
return wrapper_CPU_bins_tensor_out_histogram_out(self, bins, weight, density, hist, bin_edges);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_bin_ct_histogram(const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    // No device check
  // DeviceGuard omitted
  return at::native::histogram(self, bins, range, weight, density);
}
} // anonymous namespace
namespace {
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_bin_ct_out_histogram_out(const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
    // No device check
  // DeviceGuard omitted
  return at::native::histogram_out(self, bins, range, weight, density, hist, bin_edges);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("histogram.bin_ct",
TORCH_FN(wrapper_CPU_bin_ct_histogram));
m.impl("histogram.bin_ct_out",
TORCH_FN(wrapper_CPU_bin_ct_out_histogram_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> histogram(const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
return wrapper_CPU_bin_ct_histogram(self, bins, range, weight, density);
}
::std::tuple<at::Tensor &,at::Tensor &> histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
return wrapper_CPU_bin_ct_out_histogram_out(self, bins, range, weight, density, hist, bin_edges);
}
::std::tuple<at::Tensor &,at::Tensor &> histogram_outf(const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
return wrapper_CPU_bin_ct_out_histogram_out(self, bins, range, weight, density, hist, bin_edges);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_igammac_out_functional final : public at::native::structured_igammac_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_igammac(const at::Tensor & self, const at::Tensor & other) {
structured_igammac_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_igammac_out_out final : public at::native::structured_igammac_out {
    structured_igammac_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_igammac_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_igammac_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_igammac_out_inplace final : public at::native::structured_igammac_out {
    structured_igammac_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_igammac_(at::Tensor & self, const at::Tensor & other) {
structured_igammac_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("igammac", TORCH_FN(wrapper_CPU_igammac));
m.impl("igammac.out", TORCH_FN(wrapper_CPU_igammac_out_out));
m.impl("igammac_", TORCH_FN(wrapper_CPU_igammac_));
}
} // anonymous namespace
namespace cpu {
at::Tensor igammac(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_igammac(self, other);
}
at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_igammac_out_out(self, other, out);
}
at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_igammac_out_out(self, other, out);
}
at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_igammac_(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_remainder_out_functional final : public at::native::structured_remainder_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_remainder_Tensor(const at::Tensor & self, const at::Tensor & other) {
structured_remainder_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_remainder_out_out final : public at::native::structured_remainder_out {
    structured_remainder_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_remainder_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_remainder_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_remainder_out_inplace final : public at::native::structured_remainder_out {
    structured_remainder_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_remainder__Tensor(at::Tensor & self, const at::Tensor & other) {
structured_remainder_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("remainder.Tensor", TORCH_FN(wrapper_CPU_remainder_Tensor));
m.impl("remainder.Tensor_out", TORCH_FN(wrapper_CPU_remainder_out_Tensor_out));
m.impl("remainder_.Tensor", TORCH_FN(wrapper_CPU_remainder__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor remainder(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_remainder_Tensor(self, other);
}
at::Tensor & remainder_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_remainder_out_Tensor_out(self, other, out);
}
at::Tensor & remainder_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_remainder_out_Tensor_out(self, other, out);
}
at::Tensor & remainder_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_remainder__Tensor(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Scalar_Tensor_remainder(const at::Scalar & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::remainder(self, other);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("remainder.Scalar_Tensor",
TORCH_FN(wrapper_CPU_Scalar_Tensor_remainder));
}
} // anonymous namespace
namespace cpu {
at::Tensor remainder(const at::Scalar & self, const at::Tensor & other) {
return wrapper_CPU_Scalar_Tensor_remainder(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_sort_stable_out_functional final : public at::native::structured_sort_stable_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 2> outputs_;
};
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_sort_stable(const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending) {
structured_sort_stable_out_functional op;
op.meta(self, stable, dim, descending);
op.impl(self, stable, dim, descending, op.outputs_[0], op.outputs_[1]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]));
}
struct structured_sort_stable_out_out final : public at::native::structured_sort_stable_out {
    structured_sort_stable_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 2> outputs_;
    std::array<::std::optional<Tensor>, 2> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_sort_out_values_stable(const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
structured_sort_stable_out_out op(values, indices);
op.meta(self, stable, dim, descending);
op.impl(self, stable, dim, descending, op.maybe_get_output(0), op.maybe_get_output(1));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
return std::forward_as_tuple(values, indices);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("sort.stable", TORCH_FN(wrapper_CPU_sort_stable));
m.impl("sort.values_stable", TORCH_FN(wrapper_CPU_sort_out_values_stable));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending) {
return wrapper_CPU_sort_stable(self, stable, dim, descending);
}
::std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending) {
return wrapper_CPU_sort_out_values_stable(self, stable, dim, descending, values, indices);
}
::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
return wrapper_CPU_sort_out_values_stable(self, stable, dim, descending, values, indices);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_all_all_out_functional final : public at::native::structured_all_all_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_all(const at::Tensor & self) {
structured_all_all_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_all_all_out_out final : public at::native::structured_all_all_out {
    structured_all_all_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_all_out_all_out(const at::Tensor & self, at::Tensor & out) {
structured_all_all_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("all", TORCH_FN(wrapper_CPU_all));
m.impl("all.all_out", TORCH_FN(wrapper_CPU_all_out_all_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor all(const at::Tensor & self) {
return wrapper_CPU_all(self);
}
at::Tensor & all_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_all_out_all_out(self, out);
}
at::Tensor & all_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_all_out_all_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_renorm_out_functional final : public at::native::structured_renorm_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_renorm(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
structured_renorm_out_functional op;
op.meta(self, p, dim, maxnorm);
op.impl(self, p, dim, maxnorm, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_renorm_out_out final : public at::native::structured_renorm_out {
    structured_renorm_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_renorm_out_out(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) {
structured_renorm_out_out op(out);
op.meta(self, p, dim, maxnorm);
op.impl(self, p, dim, maxnorm, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_renorm_out_inplace final : public at::native::structured_renorm_out {
    structured_renorm_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_renorm_(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
structured_renorm_out_inplace op(self);
op.meta(self, p, dim, maxnorm);
op.impl(self, p, dim, maxnorm, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("renorm", TORCH_FN(wrapper_CPU_renorm));
m.impl("renorm.out", TORCH_FN(wrapper_CPU_renorm_out_out));
m.impl("renorm_", TORCH_FN(wrapper_CPU_renorm_));
}
} // anonymous namespace
namespace cpu {
at::Tensor renorm(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
return wrapper_CPU_renorm(self, p, dim, maxnorm);
}
at::Tensor & renorm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
return wrapper_CPU_renorm_out_out(self, p, dim, maxnorm, out);
}
at::Tensor & renorm_outf(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) {
return wrapper_CPU_renorm_out_out(self, p, dim, maxnorm, out);
}
at::Tensor & renorm_(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
return wrapper_CPU_renorm_(self, p, dim, maxnorm);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__unfold_backward(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
    // No device check
  // DeviceGuard omitted
  return at::native::unfold_backward(grad_in, C10_AS_INTARRAYREF_SLOW(input_sizes), dim, size, step);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("unfold_backward",
TORCH_FN(wrapper_CPU__unfold_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor unfold_backward(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
return wrapper_CPU__unfold_backward(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step);
}
at::Tensor unfold_backward_symint(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
return wrapper_CPU__unfold_backward(grad_in, input_sizes, dim, size, step);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_pow_Tensor_Tensor_out_functional final : public at::native::structured_pow_Tensor_Tensor_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_pow_Tensor_Tensor(const at::Tensor & self, const at::Tensor & exponent) {
structured_pow_Tensor_Tensor_out_functional op;
op.meta(self, exponent);
op.impl(self, exponent, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_pow_Tensor_Tensor_out_out final : public at::native::structured_pow_Tensor_Tensor_out {
    structured_pow_Tensor_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_pow_out_Tensor_Tensor_out(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
structured_pow_Tensor_Tensor_out_out op(out);
op.meta(self, exponent);
op.impl(self, exponent, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_pow_Tensor_Tensor_out_inplace final : public at::native::structured_pow_Tensor_Tensor_out {
    structured_pow_Tensor_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_pow__Tensor(at::Tensor & self, const at::Tensor & exponent) {
structured_pow_Tensor_Tensor_out_inplace op(self);
op.meta(self, exponent);
op.impl(self, exponent, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("pow.Tensor_Tensor", TORCH_FN(wrapper_CPU_pow_Tensor_Tensor));
m.impl("pow.Tensor_Tensor_out", TORCH_FN(wrapper_CPU_pow_out_Tensor_Tensor_out));
m.impl("pow_.Tensor", TORCH_FN(wrapper_CPU_pow__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent) {
return wrapper_CPU_pow_Tensor_Tensor(self, exponent);
}
at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) {
return wrapper_CPU_pow_out_Tensor_Tensor_out(self, exponent, out);
}
at::Tensor & pow_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
return wrapper_CPU_pow_out_Tensor_Tensor_out(self, exponent, out);
}
at::Tensor & pow_(at::Tensor & self, const at::Tensor & exponent) {
return wrapper_CPU_pow__Tensor(self, exponent);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_pow_Scalar_out_functional final : public at::native::structured_pow_Scalar_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_pow_Scalar(const at::Scalar & self, const at::Tensor & exponent) {
structured_pow_Scalar_out_functional op;
op.meta(self, exponent);
op.impl(self, exponent, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_pow_Scalar_out_out final : public at::native::structured_pow_Scalar_out {
    structured_pow_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_pow_out_Scalar_out(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
structured_pow_Scalar_out_out op(out);
op.meta(self, exponent);
op.impl(self, exponent, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("pow.Scalar", TORCH_FN(wrapper_CPU_pow_Scalar));
m.impl("pow.Scalar_out", TORCH_FN(wrapper_CPU_pow_out_Scalar_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent) {
return wrapper_CPU_pow_Scalar(self, exponent);
}
at::Tensor & pow_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) {
return wrapper_CPU_pow_out_Scalar_out(self, exponent, out);
}
at::Tensor & pow_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
return wrapper_CPU_pow_out_Scalar_out(self, exponent, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_pow_Tensor_Scalar_out_functional final : public at::native::structured_pow_Tensor_Scalar_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_pow_Tensor_Scalar(const at::Tensor & self, const at::Scalar & exponent) {
structured_pow_Tensor_Scalar_out_functional op;
op.meta(self, exponent);
op.impl(self, exponent, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_pow_Tensor_Scalar_out_out final : public at::native::structured_pow_Tensor_Scalar_out {
    structured_pow_Tensor_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_pow_out_Tensor_Scalar_out(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
structured_pow_Tensor_Scalar_out_out op(out);
op.meta(self, exponent);
op.impl(self, exponent, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_pow_Tensor_Scalar_out_inplace final : public at::native::structured_pow_Tensor_Scalar_out {
    structured_pow_Tensor_Scalar_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_pow__Scalar(at::Tensor & self, const at::Scalar & exponent) {
structured_pow_Tensor_Scalar_out_inplace op(self);
op.meta(self, exponent);
op.impl(self, exponent, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("pow.Tensor_Scalar", TORCH_FN(wrapper_CPU_pow_Tensor_Scalar));
m.impl("pow.Tensor_Scalar_out", TORCH_FN(wrapper_CPU_pow_out_Tensor_Scalar_out));
m.impl("pow_.Scalar", TORCH_FN(wrapper_CPU_pow__Scalar));
}
} // anonymous namespace
namespace cpu {
at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent) {
return wrapper_CPU_pow_Tensor_Scalar(self, exponent);
}
at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) {
return wrapper_CPU_pow_out_Tensor_Scalar_out(self, exponent, out);
}
at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
return wrapper_CPU_pow_out_Tensor_Scalar_out(self, exponent, out);
}
at::Tensor & pow_(at::Tensor & self, const at::Scalar & exponent) {
return wrapper_CPU_pow__Scalar(self, exponent);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU__normal_(at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::normal_(self, mean, std, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("normal_",
TORCH_FN(wrapper_CPU__normal_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & normal_(at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
return wrapper_CPU__normal_(self, mean, std, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Tensor_float_normal(const at::Tensor & mean, double std, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::normal(mean, std, generator);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_Tensor_float_out_normal_out(const at::Tensor & mean, double std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::normal_out(mean, std, generator, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("normal.Tensor_float",
TORCH_FN(wrapper_CPU_Tensor_float_normal));
m.impl("normal.Tensor_float_out",
TORCH_FN(wrapper_CPU_Tensor_float_out_normal_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor normal(const at::Tensor & mean, double std, ::std::optional<at::Generator> generator) {
return wrapper_CPU_Tensor_float_normal(mean, std, generator);
}
at::Tensor & normal_out(at::Tensor & out, const at::Tensor & mean, double std, ::std::optional<at::Generator> generator) {
return wrapper_CPU_Tensor_float_out_normal_out(mean, std, generator, out);
}
at::Tensor & normal_outf(const at::Tensor & mean, double std, ::std::optional<at::Generator> generator, at::Tensor & out) {
return wrapper_CPU_Tensor_float_out_normal_out(mean, std, generator, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_float_Tensor_normal(double mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::normal(mean, std, generator);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_float_Tensor_out_normal_out(double mean, const at::Tensor & std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::normal_out(mean, std, generator, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("normal.float_Tensor",
TORCH_FN(wrapper_CPU_float_Tensor_normal));
m.impl("normal.float_Tensor_out",
TORCH_FN(wrapper_CPU_float_Tensor_out_normal_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor normal(double mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
return wrapper_CPU_float_Tensor_normal(mean, std, generator);
}
at::Tensor & normal_out(at::Tensor & out, double mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
return wrapper_CPU_float_Tensor_out_normal_out(mean, std, generator, out);
}
at::Tensor & normal_outf(double mean, const at::Tensor & std, ::std::optional<at::Generator> generator, at::Tensor & out) {
return wrapper_CPU_float_Tensor_out_normal_out(mean, std, generator, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Tensor_Tensor_normal(const at::Tensor & mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::normal(mean, std, generator);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_Tensor_Tensor_out_normal_out(const at::Tensor & mean, const at::Tensor & std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::normal_out(mean, std, generator, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("normal.Tensor_Tensor",
TORCH_FN(wrapper_CPU_Tensor_Tensor_normal));
m.impl("normal.Tensor_Tensor_out",
TORCH_FN(wrapper_CPU_Tensor_Tensor_out_normal_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor normal(const at::Tensor & mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
return wrapper_CPU_Tensor_Tensor_normal(mean, std, generator);
}
at::Tensor & normal_out(at::Tensor & out, const at::Tensor & mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
return wrapper_CPU_Tensor_Tensor_out_normal_out(mean, std, generator, out);
}
at::Tensor & normal_outf(const at::Tensor & mean, const at::Tensor & std, ::std::optional<at::Generator> generator, at::Tensor & out) {
return wrapper_CPU_Tensor_Tensor_out_normal_out(mean, std, generator, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU___amp_update_scale_(at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
    // No device check
  // DeviceGuard omitted
  return at::native::_amp_update_scale_cpu_(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_amp_update_scale_",
TORCH_FN(wrapper_CPU___amp_update_scale_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & _amp_update_scale_(at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
return wrapper_CPU___amp_update_scale_(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__convert_indices_from_coo_to_csr_structured_cpu_functional final : public at::native::structured__convert_indices_from_coo_to_csr_structured_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__convert_indices_from_coo_to_csr(const at::Tensor & self, int64_t size, bool out_int32) {
structured__convert_indices_from_coo_to_csr_structured_cpu_functional op;
op.meta(self, size, out_int32);
op.impl(self, size, out_int32, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured__convert_indices_from_coo_to_csr_structured_cpu_out final : public at::native::structured__convert_indices_from_coo_to_csr_structured_cpu {
    structured__convert_indices_from_coo_to_csr_structured_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__convert_indices_from_coo_to_csr_out_out(const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) {
structured__convert_indices_from_coo_to_csr_structured_cpu_out op(out);
op.meta(self, size, out_int32);
op.impl(self, size, out_int32, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_convert_indices_from_coo_to_csr", TORCH_FN(wrapper_CPU__convert_indices_from_coo_to_csr));
m.impl("_convert_indices_from_coo_to_csr.out", TORCH_FN(wrapper_CPU__convert_indices_from_coo_to_csr_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _convert_indices_from_coo_to_csr(const at::Tensor & self, int64_t size, bool out_int32) {
return wrapper_CPU__convert_indices_from_coo_to_csr(self, size, out_int32);
}
at::Tensor & _convert_indices_from_coo_to_csr_out(at::Tensor & out, const at::Tensor & self, int64_t size, bool out_int32) {
return wrapper_CPU__convert_indices_from_coo_to_csr_out_out(self, size, out_int32, out);
}
at::Tensor & _convert_indices_from_coo_to_csr_outf(const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) {
return wrapper_CPU__convert_indices_from_coo_to_csr_out_out(self, size, out_int32, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    // No device check
  // DeviceGuard omitted
  return at::native::multi_margin_loss_cpu_backward(grad_output, self, target, p, margin, weight, reduction);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_grad_input_multi_margin_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::multi_margin_loss_cpu_backward_out(grad_output, self, target, p, margin, weight, reduction, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("multi_margin_loss_backward",
TORCH_FN(wrapper_CPU__multi_margin_loss_backward));
m.impl("multi_margin_loss_backward.grad_input",
TORCH_FN(wrapper_CPU_grad_input_multi_margin_loss_backward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
return wrapper_CPU__multi_margin_loss_backward(grad_output, self, target, p, margin, weight, reduction);
}
at::Tensor & multi_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
return wrapper_CPU_grad_input_multi_margin_loss_backward_out(grad_output, self, target, p, margin, weight, reduction, grad_input);
}
at::Tensor & multi_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
return wrapper_CPU_grad_input_multi_margin_loss_backward_out(grad_output, self, target, p, margin, weight, reduction, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_nll_loss_forward_out_cpu_functional final : public at::native::structured_nll_loss_forward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 2> outputs_;
};
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
structured_nll_loss_forward_out_cpu_functional op;
op.meta(self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index);
op.impl(self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index, op.outputs_[0], op.outputs_[1]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]));
}
struct structured_nll_loss_forward_out_cpu_out final : public at::native::structured_nll_loss_forward_out_cpu {
    structured_nll_loss_forward_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 2> outputs_;
    std::array<::std::optional<Tensor>, 2> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_nll_loss_forward_out_output(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) {
structured_nll_loss_forward_out_cpu_out op(output, total_weight);
op.meta(self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index);
op.impl(self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index, op.maybe_get_output(0), op.maybe_get_output(1));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
return std::forward_as_tuple(output, total_weight);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("nll_loss_forward", TORCH_FN(wrapper_CPU_nll_loss_forward));
m.impl("nll_loss_forward.output", TORCH_FN(wrapper_CPU_nll_loss_forward_out_output));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
return wrapper_CPU_nll_loss_forward(self, target, weight, reduction, ignore_index);
}
::std::tuple<at::Tensor,at::Tensor> nll_loss_forward_symint(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
return wrapper_CPU_nll_loss_forward(self, target, weight, reduction, ignore_index.guard_int(__FILE__, __LINE__));
}
::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
return wrapper_CPU_nll_loss_forward_out_output(self, target, weight, reduction, ignore_index, output, total_weight);
}
::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) {
return wrapper_CPU_nll_loss_forward_out_output(self, target, weight, reduction, ignore_index, output, total_weight);
}
::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_symint_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
return wrapper_CPU_nll_loss_forward_out_output(self, target, weight, reduction, ignore_index.guard_int(__FILE__, __LINE__), output, total_weight);
}
::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_symint_outf(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
return wrapper_CPU_nll_loss_forward_out_output(self, target, weight, reduction, ignore_index.guard_int(__FILE__, __LINE__), output, total_weight);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__hardswish(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::hardswish(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_hardswish_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::hardswish_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU__hardswish_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::hardswish_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("hardswish",
TORCH_FN(wrapper_CPU__hardswish));
m.impl("hardswish.out",
TORCH_FN(wrapper_CPU_out_hardswish_out));
m.impl("hardswish_",
TORCH_FN(wrapper_CPU__hardswish_));
}
} // anonymous namespace
namespace cpu {
at::Tensor hardswish(const at::Tensor & self) {
return wrapper_CPU__hardswish(self);
}
at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_out_hardswish_out(self, out);
}
at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_out_hardswish_out(self, out);
}
at::Tensor & hardswish_(at::Tensor & self) {
return wrapper_CPU__hardswish_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::hardswish_backward(grad_output, self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("hardswish_backward",
TORCH_FN(wrapper_CPU__hardswish_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
return wrapper_CPU__hardswish_backward(grad_output, self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_softshrink_backward_out_functional final : public at::native::structured_softshrink_backward_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
structured_softshrink_backward_out_functional op;
op.meta(grad_output, self, lambd);
op.impl(grad_output, self, lambd, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_softshrink_backward_out_out final : public at::native::structured_softshrink_backward_out {
    structured_softshrink_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_softshrink_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
structured_softshrink_backward_out_out op(grad_input);
op.meta(grad_output, self, lambd);
op.impl(grad_output, self, lambd, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("softshrink_backward", TORCH_FN(wrapper_CPU_softshrink_backward));
m.impl("softshrink_backward.grad_input", TORCH_FN(wrapper_CPU_softshrink_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
return wrapper_CPU_softshrink_backward(grad_output, self, lambd);
}
at::Tensor & softshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
return wrapper_CPU_softshrink_backward_out_grad_input(grad_output, self, lambd, grad_input);
}
at::Tensor & softshrink_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
return wrapper_CPU_softshrink_backward_out_grad_input(grad_output, self, lambd, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::adaptive_avg_pool2d_backward_cpu(grad_output, self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_adaptive_avg_pool2d_backward",
TORCH_FN(wrapper_CPU___adaptive_avg_pool2d_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
return wrapper_CPU___adaptive_avg_pool2d_backward(grad_output, self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_adaptive_avg_pool3d_out(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::adaptive_avg_pool3d_out_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("adaptive_avg_pool3d.out",
TORCH_FN(wrapper_CPU_out_adaptive_avg_pool3d_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
return wrapper_CPU_out_adaptive_avg_pool3d_out(self, c10::fromIntArrayRefSlow(output_size), out);
}
at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
return wrapper_CPU_out_adaptive_avg_pool3d_out(self, c10::fromIntArrayRefSlow(output_size), out);
}
at::Tensor & adaptive_avg_pool3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
return wrapper_CPU_out_adaptive_avg_pool3d_out(self, output_size, out);
}
at::Tensor & adaptive_avg_pool3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
return wrapper_CPU_out_adaptive_avg_pool3d_out(self, output_size, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_adaptive_max_pool3d_backward_out_cpu_functional final : public at::native::structured_adaptive_max_pool3d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
structured_adaptive_max_pool3d_backward_out_cpu_functional op;
op.meta(grad_output, self, indices);
op.impl(grad_output, self, indices, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_adaptive_max_pool3d_backward_out_cpu_out final : public at::native::structured_adaptive_max_pool3d_backward_out_cpu {
    structured_adaptive_max_pool3d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_adaptive_max_pool3d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
structured_adaptive_max_pool3d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, self, indices);
op.impl(grad_output, self, indices, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("adaptive_max_pool3d_backward", TORCH_FN(wrapper_CPU_adaptive_max_pool3d_backward));
m.impl("adaptive_max_pool3d_backward.grad_input", TORCH_FN(wrapper_CPU_adaptive_max_pool3d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
return wrapper_CPU_adaptive_max_pool3d_backward(grad_output, self, indices);
}
at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
return wrapper_CPU_adaptive_max_pool3d_backward_out_grad_input(grad_output, self, indices, grad_input);
}
at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
return wrapper_CPU_adaptive_max_pool3d_backward_out_grad_input(grad_output, self, indices, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_avg_pool2d_out_cpu_functional final : public at::native::structured_avg_pool2d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
structured_avg_pool2d_out_cpu_functional op;
auto precompute = op.meta(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
(void)precompute;
op.impl(self, precompute.kH, precompute.kW, precompute.dH, precompute.dW, precompute.padH, precompute.padW, ceil_mode, count_include_pad, divisor_override, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_avg_pool2d_out_cpu_out final : public at::native::structured_avg_pool2d_out_cpu {
    structured_avg_pool2d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_avg_pool2d_out_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & out) {
structured_avg_pool2d_out_cpu_out op(out);
auto precompute = op.meta(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
(void)precompute;
op.impl(self, precompute.kH, precompute.kW, precompute.dH, precompute.dW, precompute.padH, precompute.padW, ceil_mode, count_include_pad, divisor_override, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("avg_pool2d", TORCH_FN(wrapper_CPU_avg_pool2d));
m.impl("avg_pool2d.out", TORCH_FN(wrapper_CPU_avg_pool2d_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
return wrapper_CPU_avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}
at::Tensor & avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
return wrapper_CPU_avg_pool2d_out_out(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
}
at::Tensor & avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & out) {
return wrapper_CPU_avg_pool2d_out_out(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_avg_pool3d_backward_out_cpu_functional final : public at::native::structured_avg_pool3d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
structured_avg_pool3d_backward_out_cpu_functional op;
op.meta(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
op.impl(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_avg_pool3d_backward_out_cpu_out final : public at::native::structured_avg_pool3d_backward_out_cpu {
    structured_avg_pool3d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_avg_pool3d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input) {
structured_avg_pool3d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
op.impl(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("avg_pool3d_backward", TORCH_FN(wrapper_CPU_avg_pool3d_backward));
m.impl("avg_pool3d_backward.grad_input", TORCH_FN(wrapper_CPU_avg_pool3d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
return wrapper_CPU_avg_pool3d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}
at::Tensor & avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
return wrapper_CPU_avg_pool3d_backward_out_grad_input(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
}
at::Tensor & avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input) {
return wrapper_CPU_avg_pool3d_backward_out_grad_input(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_fractional_max_pool2d_out_cpu_functional final : public at::native::structured_fractional_max_pool2d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 2> outputs_;
};
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
structured_fractional_max_pool2d_out_cpu_functional op;
op.meta(self, kernel_size, output_size, random_samples);
op.impl(self, kernel_size, output_size, random_samples, op.outputs_[0], op.outputs_[1]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]));
}
struct structured_fractional_max_pool2d_out_cpu_out final : public at::native::structured_fractional_max_pool2d_out_cpu {
    structured_fractional_max_pool2d_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 2> outputs_;
    std::array<::std::optional<Tensor>, 2> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_fractional_max_pool2d_out_output(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
structured_fractional_max_pool2d_out_cpu_out op(output, indices);
op.meta(self, kernel_size, output_size, random_samples);
op.impl(self, kernel_size, output_size, random_samples, op.maybe_get_output(0), op.maybe_get_output(1));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
return std::forward_as_tuple(output, indices);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("fractional_max_pool2d", TORCH_FN(wrapper_CPU_fractional_max_pool2d));
m.impl("fractional_max_pool2d.output", TORCH_FN(wrapper_CPU_fractional_max_pool2d_out_output));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
return wrapper_CPU_fractional_max_pool2d(self, kernel_size, output_size, random_samples);
}
::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
return wrapper_CPU_fractional_max_pool2d_out_output(self, kernel_size, output_size, random_samples, output, indices);
}
::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
return wrapper_CPU_fractional_max_pool2d_out_output(self, kernel_size, output_size, random_samples, output, indices);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_reflection_pad3d_backward_out_cpu_functional final : public at::native::structured_reflection_pad3d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
structured_reflection_pad3d_backward_out_cpu_functional op;
op.meta(grad_output, self, padding);
op.impl(grad_output, self, padding, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_reflection_pad3d_backward_out_cpu_out final : public at::native::structured_reflection_pad3d_backward_out_cpu {
    structured_reflection_pad3d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_reflection_pad3d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
structured_reflection_pad3d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, self, padding);
op.impl(grad_output, self, padding, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("reflection_pad3d_backward", TORCH_FN(wrapper_CPU_reflection_pad3d_backward));
m.impl("reflection_pad3d_backward.grad_input", TORCH_FN(wrapper_CPU_reflection_pad3d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
return wrapper_CPU_reflection_pad3d_backward(grad_output, self, padding);
}
at::Tensor reflection_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
return wrapper_CPU_reflection_pad3d_backward(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding));
}
at::Tensor & reflection_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
return wrapper_CPU_reflection_pad3d_backward_out_grad_input(grad_output, self, padding, grad_input);
}
at::Tensor & reflection_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
return wrapper_CPU_reflection_pad3d_backward_out_grad_input(grad_output, self, padding, grad_input);
}
at::Tensor & reflection_pad3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
return wrapper_CPU_reflection_pad3d_backward_out_grad_input(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
}
at::Tensor & reflection_pad3d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
return wrapper_CPU_reflection_pad3d_backward_out_grad_input(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_replication_pad2d_out_cpu_functional final : public at::native::structured_replication_pad2d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_replication_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
structured_replication_pad2d_out_cpu_functional op;
op.meta(self, padding);
op.impl(self, padding, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_replication_pad2d_out_cpu_out final : public at::native::structured_replication_pad2d_out_cpu {
    structured_replication_pad2d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_replication_pad2d_out_out(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
structured_replication_pad2d_out_cpu_out op(out);
op.meta(self, padding);
op.impl(self, padding, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("replication_pad2d", TORCH_FN(wrapper_CPU_replication_pad2d));
m.impl("replication_pad2d.out", TORCH_FN(wrapper_CPU_replication_pad2d_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor replication_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
return wrapper_CPU_replication_pad2d(self, padding);
}
at::Tensor replication_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
return wrapper_CPU_replication_pad2d(self, C10_AS_INTARRAYREF_SLOW(padding));
}
at::Tensor & replication_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
return wrapper_CPU_replication_pad2d_out_out(self, padding, out);
}
at::Tensor & replication_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
return wrapper_CPU_replication_pad2d_out_out(self, padding, out);
}
at::Tensor & replication_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
return wrapper_CPU_replication_pad2d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
}
at::Tensor & replication_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
return wrapper_CPU_replication_pad2d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__upsample_bilinear2d_aa_backward_out_cpu_functional final : public at::native::structured__upsample_bilinear2d_aa_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured__upsample_bilinear2d_aa_backward_out_cpu_functional op;
op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured__upsample_bilinear2d_aa_backward_out_cpu_out final : public at::native::structured__upsample_bilinear2d_aa_backward_out_cpu {
    structured__upsample_bilinear2d_aa_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
structured__upsample_bilinear2d_aa_backward_out_cpu_out op(grad_input);
op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_upsample_bilinear2d_aa_backward", TORCH_FN(wrapper_CPU__upsample_bilinear2d_aa_backward));
m.impl("_upsample_bilinear2d_aa_backward.grad_input", TORCH_FN(wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor _upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bilinear2d_aa_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}
at::Tensor _upsample_bilinear2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bilinear2d_aa_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
}
at::Tensor & _upsample_bilinear2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & _upsample_bilinear2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & _upsample_bilinear2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & _upsample_bilinear2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__upsample_bicubic2d_aa_out_cpu_functional final : public at::native::structured__upsample_bicubic2d_aa_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured__upsample_bicubic2d_aa_out_cpu_functional op;
op.meta(self, output_size, align_corners, scales_h, scales_w);
op.impl(self, output_size, align_corners, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured__upsample_bicubic2d_aa_out_cpu_out final : public at::native::structured__upsample_bicubic2d_aa_out_cpu {
    structured__upsample_bicubic2d_aa_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__upsample_bicubic2d_aa_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
structured__upsample_bicubic2d_aa_out_cpu_out op(out);
op.meta(self, output_size, align_corners, scales_h, scales_w);
op.impl(self, output_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_upsample_bicubic2d_aa", TORCH_FN(wrapper_CPU__upsample_bicubic2d_aa));
m.impl("_upsample_bicubic2d_aa.out", TORCH_FN(wrapper_CPU__upsample_bicubic2d_aa_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bicubic2d_aa(self, output_size, align_corners, scales_h, scales_w);
}
at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bicubic2d_aa(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
}
at::Tensor & _upsample_bicubic2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bicubic2d_aa_out_out(self, output_size, align_corners, scales_h, scales_w, out);
}
at::Tensor & _upsample_bicubic2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU__upsample_bicubic2d_aa_out_out(self, output_size, align_corners, scales_h, scales_w, out);
}
at::Tensor & _upsample_bicubic2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bicubic2d_aa_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
}
at::Tensor & _upsample_bicubic2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU__upsample_bicubic2d_aa_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_trilinear3d_out_cpu_functional final : public at::native::structured_upsample_trilinear3d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured_upsample_trilinear3d_out_cpu_functional op;
op.meta(self, output_size, align_corners, scales_d, scales_h, scales_w);
op.impl(self, output_size, align_corners, scales_d, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_trilinear3d_out_cpu_out final : public at::native::structured_upsample_trilinear3d_out_cpu {
    structured_upsample_trilinear3d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_trilinear3d_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
structured_upsample_trilinear3d_out_cpu_out op(out);
op.meta(self, output_size, align_corners, scales_d, scales_h, scales_w);
op.impl(self, output_size, align_corners, scales_d, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_trilinear3d", TORCH_FN(wrapper_CPU_upsample_trilinear3d));
m.impl("upsample_trilinear3d.out", TORCH_FN(wrapper_CPU_upsample_trilinear3d_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_trilinear3d(self, output_size, align_corners, scales_d, scales_h, scales_w);
}
at::Tensor upsample_trilinear3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_trilinear3d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_d, scales_h, scales_w);
}
at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_trilinear3d_out_out(self, output_size, align_corners, scales_d, scales_h, scales_w, out);
}
at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU_upsample_trilinear3d_out_out(self, output_size, align_corners, scales_d, scales_h, scales_w, out);
}
at::Tensor & upsample_trilinear3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_trilinear3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_d, scales_h, scales_w, out);
}
at::Tensor & upsample_trilinear3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU_upsample_trilinear3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_d, scales_h, scales_w, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__upsample_nearest_exact1d_backward_out_cpu_functional final : public at::native::structured__upsample_nearest_exact1d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales) {
structured__upsample_nearest_exact1d_backward_out_cpu_functional op;
op.meta(grad_output, output_size, input_size, scales);
op.impl(grad_output, output_size, input_size, scales, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured__upsample_nearest_exact1d_backward_out_cpu_out final : public at::native::structured__upsample_nearest_exact1d_backward_out_cpu {
    structured__upsample_nearest_exact1d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
structured__upsample_nearest_exact1d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, output_size, input_size, scales);
op.impl(grad_output, output_size, input_size, scales, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_upsample_nearest_exact1d_backward", TORCH_FN(wrapper_CPU__upsample_nearest_exact1d_backward));
m.impl("_upsample_nearest_exact1d_backward.grad_input", TORCH_FN(wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales) {
return wrapper_CPU__upsample_nearest_exact1d_backward(grad_output, output_size, input_size, scales);
}
at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
return wrapper_CPU__upsample_nearest_exact1d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales);
}
at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales) {
return wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input(grad_output, output_size, input_size, scales, grad_input);
}
at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
return wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input(grad_output, output_size, input_size, scales, grad_input);
}
at::Tensor & _upsample_nearest_exact1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
return wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales, grad_input);
}
at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
return wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_nearest2d_backward_out_cpu_functional final : public at::native::structured_upsample_nearest2d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured_upsample_nearest2d_backward_out_cpu_functional op;
op.meta(grad_output, output_size, input_size, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_nearest2d_backward_out_cpu_out final : public at::native::structured_upsample_nearest2d_backward_out_cpu {
    structured_upsample_nearest2d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_nearest2d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
structured_upsample_nearest2d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, output_size, input_size, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_nearest2d_backward", TORCH_FN(wrapper_CPU_upsample_nearest2d_backward));
m.impl("upsample_nearest2d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_nearest2d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest2d_backward(grad_output, output_size, input_size, scales_h, scales_w);
}
at::Tensor upsample_nearest2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest2d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_h, scales_w);
}
at::Tensor & upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest2d_backward_out_grad_input(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_nearest2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU_upsample_nearest2d_backward_out_grad_input(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_nearest2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_h, scales_w, grad_input);
}
at::Tensor & upsample_nearest2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU_upsample_nearest2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_h, scales_w, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_tanh_backward_out_functional final : public at::native::structured_tanh_backward_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_tanh_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_tanh_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) {
structured_tanh_backward_out_functional op;
op.meta(grad_output, output);
op.impl(grad_output, output, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_tanh_backward_out_out final : public at::native::structured_tanh_backward_out {
    structured_tanh_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_tanh_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_tanh_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_tanh_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
structured_tanh_backward_out_out op(grad_input);
op.meta(grad_output, output);
op.impl(grad_output, output, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("tanh_backward", TORCH_FN(wrapper_CPU_tanh_backward));
m.impl("tanh_backward.grad_input", TORCH_FN(wrapper_CPU_tanh_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) {
return wrapper_CPU_tanh_backward(grad_output, output);
}
at::Tensor & tanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) {
return wrapper_CPU_tanh_backward_out_grad_input(grad_output, output, grad_input);
}
at::Tensor & tanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
return wrapper_CPU_tanh_backward_out_grad_input(grad_output, output, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__col2im(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
    // No device check
  // DeviceGuard omitted
  return at::native::col2im_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), kernel_size, dilation, padding, stride);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_col2im_out(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::col2im_out_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), kernel_size, dilation, padding, stride, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("col2im",
TORCH_FN(wrapper_CPU__col2im));
m.impl("col2im.out",
TORCH_FN(wrapper_CPU_out_col2im_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
return wrapper_CPU__col2im(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride);
}
at::Tensor col2im_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
return wrapper_CPU__col2im(self, output_size, kernel_size, dilation, padding, stride);
}
at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
return wrapper_CPU_out_col2im_out(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
}
at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
return wrapper_CPU_out_col2im_out(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
}
at::Tensor & col2im_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
return wrapper_CPU_out_col2im_out(self, output_size, kernel_size, dilation, padding, stride, out);
}
at::Tensor & col2im_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
return wrapper_CPU_out_col2im_out(self, output_size, kernel_size, dilation, padding, stride, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
    // No device check
  // DeviceGuard omitted
  return at::native::im2col_cpu(self, kernel_size, dilation, padding, stride);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_im2col_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::im2col_out_cpu(self, kernel_size, dilation, padding, stride, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("im2col",
TORCH_FN(wrapper_CPU__im2col));
m.impl("im2col.out",
TORCH_FN(wrapper_CPU_out_im2col_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
return wrapper_CPU__im2col(self, kernel_size, dilation, padding, stride);
}
at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
return wrapper_CPU_out_im2col_out(self, kernel_size, dilation, padding, stride, out);
}
at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
return wrapper_CPU_out_im2col_out(self, kernel_size, dilation, padding, stride, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_isneginf_out_functional final : public at::native::structured_isneginf_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_isneginf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_isneginf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_isneginf(const at::Tensor & self) {
structured_isneginf_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_isneginf_out_out final : public at::native::structured_isneginf_out {
    structured_isneginf_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_isneginf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_isneginf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_isneginf_out_out(const at::Tensor & self, at::Tensor & out) {
structured_isneginf_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("isneginf", TORCH_FN(wrapper_CPU_isneginf));
m.impl("isneginf.out", TORCH_FN(wrapper_CPU_isneginf_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor isneginf(const at::Tensor & self) {
return wrapper_CPU_isneginf(self);
}
at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_isneginf_out_out(self, out);
}
at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_isneginf_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_linalg_lu_factor_ex_out_functional final : public at::native::structured_linalg_lu_factor_ex_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 3> outputs_;
};
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_linalg_lu_factor_ex(const at::Tensor & A, bool pivot, bool check_errors) {
structured_linalg_lu_factor_ex_out_functional op;
op.meta(A, pivot, check_errors);
op.impl(A, pivot, check_errors, op.outputs_[0], op.outputs_[1], op.outputs_[2]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]), std::move(op.outputs_[2]));
}
struct structured_linalg_lu_factor_ex_out_out final : public at::native::structured_linalg_lu_factor_ex_out {
    structured_linalg_lu_factor_ex_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 3> outputs_;
    std::array<::std::optional<Tensor>, 3> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_linalg_lu_factor_ex_out_out(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
structured_linalg_lu_factor_ex_out_out op(LU, pivots, info);
op.meta(A, pivot, check_errors);
op.impl(A, pivot, check_errors, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(*op.proxy_outputs_[2]);
return std::forward_as_tuple(LU, pivots, info);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_lu_factor_ex", TORCH_FN(wrapper_CPU_linalg_lu_factor_ex));
m.impl("linalg_lu_factor_ex.out", TORCH_FN(wrapper_CPU_linalg_lu_factor_ex_out_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex(const at::Tensor & A, bool pivot, bool check_errors) {
return wrapper_CPU_linalg_lu_factor_ex(A, pivot, check_errors);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot, bool check_errors) {
return wrapper_CPU_linalg_lu_factor_ex_out_out(A, pivot, check_errors, LU, pivots, info);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
return wrapper_CPU_linalg_lu_factor_ex_out_out(A, pivot, check_errors, LU, pivots, info);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_linalg_ldl_solve_out_functional final : public at::native::structured_linalg_ldl_solve_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
structured_linalg_ldl_solve_out_functional op;
op.meta(LD, pivots, B, hermitian);
op.impl(LD, pivots, B, hermitian, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_linalg_ldl_solve_out_out final : public at::native::structured_linalg_ldl_solve_out {
    structured_linalg_ldl_solve_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_linalg_ldl_solve_out_out(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) {
structured_linalg_ldl_solve_out_out op(out);
op.meta(LD, pivots, B, hermitian);
op.impl(LD, pivots, B, hermitian, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_ldl_solve", TORCH_FN(wrapper_CPU_linalg_ldl_solve));
m.impl("linalg_ldl_solve.out", TORCH_FN(wrapper_CPU_linalg_ldl_solve_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
return wrapper_CPU_linalg_ldl_solve(LD, pivots, B, hermitian);
}
at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
return wrapper_CPU_linalg_ldl_solve_out_out(LD, pivots, B, hermitian, out);
}
at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) {
return wrapper_CPU_linalg_ldl_solve_out_out(LD, pivots, B, hermitian, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__linalg_slogdet_out_functional final : public at::native::structured__linalg_slogdet_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 4> outputs_;
};
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__linalg_slogdet(const at::Tensor & A) {
structured__linalg_slogdet_out_functional op;
op.meta(A);
op.impl(A, op.outputs_[0], op.outputs_[1], op.outputs_[2], op.outputs_[3]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]), std::move(op.outputs_[2]), std::move(op.outputs_[3]));
}
struct structured__linalg_slogdet_out_out final : public at::native::structured__linalg_slogdet_out {
    structured__linalg_slogdet_out_out(Tensor& out0, Tensor& out1, Tensor& out2, Tensor& out3) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2), std::ref(out3) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 4> outputs_;
    std::array<::std::optional<Tensor>, 4> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU__linalg_slogdet_out_sign(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) {
structured__linalg_slogdet_out_out op(sign, logabsdet, LU, pivots);
op.meta(A);
op.impl(A, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2), op.maybe_get_output(3));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(*op.proxy_outputs_[2]);
if (op.proxy_outputs_[3].has_value()) op.outputs_[3].get().copy_(*op.proxy_outputs_[3]);
return std::forward_as_tuple(sign, logabsdet, LU, pivots);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_linalg_slogdet", TORCH_FN(wrapper_CPU__linalg_slogdet));
m.impl("_linalg_slogdet.sign", TORCH_FN(wrapper_CPU__linalg_slogdet_out_sign));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet(const at::Tensor & A) {
return wrapper_CPU__linalg_slogdet(A);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A) {
return wrapper_CPU__linalg_slogdet_out_sign(A, sign, logabsdet, LU, pivots);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) {
return wrapper_CPU__linalg_slogdet_out_sign(A, sign, logabsdet, LU, pivots);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_linalg_eigvals_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::linalg_eigvals_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_eigvals.out",
TORCH_FN(wrapper_CPU_out_linalg_eigvals_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & linalg_eigvals_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_out_linalg_eigvals_out(self, out);
}
at::Tensor & linalg_eigvals_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_out_linalg_eigvals_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__linalg_householder_product(const at::Tensor & input, const at::Tensor & tau) {
    // No device check
  // DeviceGuard omitted
  return at::native::linalg_householder_product(input, tau);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_linalg_householder_product_out(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::linalg_householder_product_out(input, tau, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_householder_product",
TORCH_FN(wrapper_CPU__linalg_householder_product));
m.impl("linalg_householder_product.out",
TORCH_FN(wrapper_CPU_out_linalg_householder_product_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau) {
return wrapper_CPU__linalg_householder_product(input, tau);
}
at::Tensor & linalg_householder_product_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tau) {
return wrapper_CPU_out_linalg_householder_product_out(input, tau, out);
}
at::Tensor & linalg_householder_product_outf(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) {
return wrapper_CPU_out_linalg_householder_product_out(input, tau, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_linalg_vector_norm_out_functional final : public at::native::structured_linalg_vector_norm_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
structured_linalg_vector_norm_out_functional op;
op.meta(self, ord, dim, keepdim, dtype);
op.impl(self, ord, dim, keepdim, dtype, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_linalg_vector_norm_out_out final : public at::native::structured_linalg_vector_norm_out {
    structured_linalg_vector_norm_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_linalg_vector_norm_out_out(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
structured_linalg_vector_norm_out_out op(out);
op.meta(self, ord, dim, keepdim, dtype);
op.impl(self, ord, dim, keepdim, dtype, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_vector_norm", TORCH_FN(wrapper_CPU_linalg_vector_norm));
m.impl("linalg_vector_norm.out", TORCH_FN(wrapper_CPU_linalg_vector_norm_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_linalg_vector_norm(self, ord, dim, keepdim, dtype);
}
at::Tensor & linalg_vector_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_linalg_vector_norm_out_out(self, ord, dim, keepdim, dtype, out);
}
at::Tensor & linalg_vector_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
return wrapper_CPU_linalg_vector_norm_out_out(self, ord, dim, keepdim, dtype, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__linalg_solve_ex_out_functional final : public at::native::structured__linalg_solve_ex_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 4> outputs_;
};
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
structured__linalg_solve_ex_out_functional op;
op.meta(A, B, left, check_errors);
op.impl(A, B, left, check_errors, op.outputs_[0], op.outputs_[1], op.outputs_[2], op.outputs_[3]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]), std::move(op.outputs_[2]), std::move(op.outputs_[3]));
}
struct structured__linalg_solve_ex_out_out final : public at::native::structured__linalg_solve_ex_out {
    structured__linalg_solve_ex_out_out(Tensor& out0, Tensor& out1, Tensor& out2, Tensor& out3) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2), std::ref(out3) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 4> outputs_;
    std::array<::std::optional<Tensor>, 4> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU__linalg_solve_ex_out_result(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
structured__linalg_solve_ex_out_out op(result, LU, pivots, info);
op.meta(A, B, left, check_errors);
op.impl(A, B, left, check_errors, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2), op.maybe_get_output(3));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(*op.proxy_outputs_[2]);
if (op.proxy_outputs_[3].has_value()) op.outputs_[3].get().copy_(*op.proxy_outputs_[3]);
return std::forward_as_tuple(result, LU, pivots, info);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_linalg_solve_ex", TORCH_FN(wrapper_CPU__linalg_solve_ex));
m.impl("_linalg_solve_ex.result", TORCH_FN(wrapper_CPU__linalg_solve_ex_out_result));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
return wrapper_CPU__linalg_solve_ex(A, B, left, check_errors);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_out(at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
return wrapper_CPU__linalg_solve_ex_out_result(A, B, left, check_errors, result, LU, pivots, info);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_outf(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
return wrapper_CPU__linalg_solve_ex_out_result(A, B, left, check_errors, result, LU, pivots, info);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___test_optional_filled_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
    // No device check
  // DeviceGuard omitted
  return at::native::_test_optional_intlist(values, addends);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_test_optional_filled_intlist",
TORCH_FN(wrapper_CPU___test_optional_filled_intlist));
}
} // anonymous namespace
namespace cpu {
at::Tensor _test_optional_filled_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
return wrapper_CPU___test_optional_filled_intlist(values, addends);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__segment_reduce(const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & indices, const ::std::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const ::std::optional<at::Scalar> & initial) {
    // No device check
  // DeviceGuard omitted
  return at::native::segment_reduce_kernel(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("segment_reduce",
TORCH_FN(wrapper_CPU__segment_reduce));
}
} // anonymous namespace
namespace cpu {
at::Tensor segment_reduce(const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & indices, const ::std::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const ::std::optional<at::Scalar> & initial) {
return wrapper_CPU__segment_reduce(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___padded_dense_to_jagged_forward(const at::Tensor & dense, at::TensorList offsets, ::std::optional<c10::SymInt> total_L) {
    // No device check
  // DeviceGuard omitted
  return at::native::_padded_dense_to_jagged_forward_cpu(dense, offsets, total_L.has_value() ? ::std::make_optional(total_L->guard_int(__FILE__, __LINE__)) : ::std::nullopt);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_padded_dense_to_jagged_forward",
TORCH_FN(wrapper_CPU___padded_dense_to_jagged_forward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _padded_dense_to_jagged_forward(const at::Tensor & dense, at::TensorList offsets, ::std::optional<int64_t> total_L) {
return wrapper_CPU___padded_dense_to_jagged_forward(dense, offsets, total_L.has_value() ? ::std::make_optional(c10::SymInt(*total_L)) : ::std::nullopt);
}
at::Tensor _padded_dense_to_jagged_forward_symint(const at::Tensor & dense, at::TensorList offsets, ::std::optional<c10::SymInt> total_L) {
return wrapper_CPU___padded_dense_to_jagged_forward(dense, offsets, total_L);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___scaled_dot_product_flash_attention_for_cpu(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & attn_mask, ::std::optional<double> scale) {
    // No device check
  // DeviceGuard omitted
  return at::native::_scaled_dot_product_flash_attention_cpu(query, key, value, dropout_p, is_causal, attn_mask, scale);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_scaled_dot_product_flash_attention_for_cpu",
TORCH_FN(wrapper_CPU___scaled_dot_product_flash_attention_for_cpu));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_for_cpu(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & attn_mask, ::std::optional<double> scale) {
return wrapper_CPU___scaled_dot_product_flash_attention_for_cpu(query, key, value, dropout_p, is_causal, attn_mask, scale);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_bessel_j1_out_functional final : public at::native::structured_special_bessel_j1_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_j1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_j1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_bessel_j1(const at::Tensor & self) {
structured_special_bessel_j1_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_bessel_j1_out_out final : public at::native::structured_special_bessel_j1_out {
    structured_special_bessel_j1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_j1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_j1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_bessel_j1_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_bessel_j1_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_bessel_j1", TORCH_FN(wrapper_CPU_special_bessel_j1));
m.impl("special_bessel_j1.out", TORCH_FN(wrapper_CPU_special_bessel_j1_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_bessel_j1(const at::Tensor & self) {
return wrapper_CPU_special_bessel_j1(self);
}
at::Tensor & special_bessel_j1_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_bessel_j1_out_out(self, out);
}
at::Tensor & special_bessel_j1_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_bessel_j1_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_bessel_y1_out_functional final : public at::native::structured_special_bessel_y1_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_y1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_y1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_bessel_y1(const at::Tensor & self) {
structured_special_bessel_y1_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_bessel_y1_out_out final : public at::native::structured_special_bessel_y1_out {
    structured_special_bessel_y1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_y1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_y1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_bessel_y1_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_bessel_y1_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_bessel_y1", TORCH_FN(wrapper_CPU_special_bessel_y1));
m.impl("special_bessel_y1.out", TORCH_FN(wrapper_CPU_special_bessel_y1_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_bessel_y1(const at::Tensor & self) {
return wrapper_CPU_special_bessel_y1(self);
}
at::Tensor & special_bessel_y1_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_bessel_y1_out_out(self, out);
}
at::Tensor & special_bessel_y1_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_bessel_y1_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_legendre_polynomial_p_out_functional final : public at::native::structured_special_legendre_polynomial_p_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_legendre_polynomial_p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_legendre_polynomial_p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_legendre_polynomial_p(const at::Tensor & x, const at::Tensor & n) {
structured_special_legendre_polynomial_p_out_functional op;
op.meta(x, n);
op.impl(x, n, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_legendre_polynomial_p_out_out final : public at::native::structured_special_legendre_polynomial_p_out {
    structured_special_legendre_polynomial_p_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_legendre_polynomial_p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_legendre_polynomial_p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_legendre_polynomial_p_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
structured_special_legendre_polynomial_p_out_out op(out);
op.meta(x, n);
op.impl(x, n, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_legendre_polynomial_p", TORCH_FN(wrapper_CPU_special_legendre_polynomial_p));
m.impl("special_legendre_polynomial_p.out", TORCH_FN(wrapper_CPU_special_legendre_polynomial_p_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_legendre_polynomial_p(x, n);
}
at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_legendre_polynomial_p_out_out(x, n, out);
}
at::Tensor & special_legendre_polynomial_p_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
return wrapper_CPU_special_legendre_polynomial_p_out_out(x, n, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_modified_bessel_i0_out_functional final : public at::native::structured_special_modified_bessel_i0_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_modified_bessel_i0(const at::Tensor & self) {
structured_special_modified_bessel_i0_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_modified_bessel_i0_out_out final : public at::native::structured_special_modified_bessel_i0_out {
    structured_special_modified_bessel_i0_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_modified_bessel_i0_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_modified_bessel_i0_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_modified_bessel_i0", TORCH_FN(wrapper_CPU_special_modified_bessel_i0));
m.impl("special_modified_bessel_i0.out", TORCH_FN(wrapper_CPU_special_modified_bessel_i0_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_modified_bessel_i0(const at::Tensor & self) {
return wrapper_CPU_special_modified_bessel_i0(self);
}
at::Tensor & special_modified_bessel_i0_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_modified_bessel_i0_out_out(self, out);
}
at::Tensor & special_modified_bessel_i0_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_modified_bessel_i0_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_shifted_chebyshev_polynomial_t_out_functional final : public at::native::structured_special_shifted_chebyshev_polynomial_t_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n) {
structured_special_shifted_chebyshev_polynomial_t_out_functional op;
op.meta(x, n);
op.impl(x, n, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_shifted_chebyshev_polynomial_t_out_out final : public at::native::structured_special_shifted_chebyshev_polynomial_t_out {
    structured_special_shifted_chebyshev_polynomial_t_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_shifted_chebyshev_polynomial_t_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
structured_special_shifted_chebyshev_polynomial_t_out_out op(out);
op.meta(x, n);
op.impl(x, n, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_shifted_chebyshev_polynomial_t", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_t));
m.impl("special_shifted_chebyshev_polynomial_t.out", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_t_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_t(x, n);
}
at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_t_out_out(x, n, out);
}
at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_t_out_out(x, n, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_shifted_chebyshev_polynomial_u_out_functional final : public at::native::structured_special_shifted_chebyshev_polynomial_u_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) {
structured_special_shifted_chebyshev_polynomial_u_out_functional op;
op.meta(x, n);
op.impl(x, n, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_shifted_chebyshev_polynomial_u_out_out final : public at::native::structured_special_shifted_chebyshev_polynomial_u_out {
    structured_special_shifted_chebyshev_polynomial_u_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_shifted_chebyshev_polynomial_u_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
structured_special_shifted_chebyshev_polynomial_u_out_out op(out);
op.meta(x, n);
op.impl(x, n, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_shifted_chebyshev_polynomial_u", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_u));
m.impl("special_shifted_chebyshev_polynomial_u.out", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_u_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_u(x, n);
}
at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_u_out_out(x, n, out);
}
at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_u_out_out(x, n, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_shifted_chebyshev_polynomial_w_out_functional final : public at::native::structured_special_shifted_chebyshev_polynomial_w_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
structured_special_shifted_chebyshev_polynomial_w_out_functional op;
op.meta(x, n);
op.impl(x, n, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_shifted_chebyshev_polynomial_w_out_out final : public at::native::structured_special_shifted_chebyshev_polynomial_w_out {
    structured_special_shifted_chebyshev_polynomial_w_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_shifted_chebyshev_polynomial_w_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
structured_special_shifted_chebyshev_polynomial_w_out_out op(out);
op.meta(x, n);
op.impl(x, n, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_shifted_chebyshev_polynomial_w", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_w));
m.impl("special_shifted_chebyshev_polynomial_w.out", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_w_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_w(x, n);
}
at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_w_out_out(x, n, out);
}
at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_w_out_out(x, n, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
void wrapper_CPU___fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fused_adam_kernel_cpu_(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fused_adam_",
TORCH_FN(wrapper_CPU___fused_adam_));
}
} // anonymous namespace
namespace cpu {
void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
return wrapper_CPU___fused_adam_(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
void wrapper_CPU_tensor_lr__fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fused_adam_kernel_cpu_(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fused_adam_.tensor_lr",
TORCH_FN(wrapper_CPU_tensor_lr__fused_adam_));
}
} // anonymous namespace
namespace cpu {
void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
return wrapper_CPU_tensor_lr__fused_adam_(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}
} // namespace cpu
} // namespace at
