// required for old g++ to compile PRId64 macros, see
// https://github.com/pytorch/pytorch/issues/3571
// for context
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif

// an external backend might generate file within its code tree
// and check all the source files within the tree with clang-format.
// so, disable it since the backend might have a different config.
// clang-format off

// NOTE: This condition is true for all PyTorch internal libraries, it
//       just excludes external projects such as torch_xla which
//       re-use some of the PyTorch codegen machinery.
#if defined(CAFFE2_BUILD_MAIN_LIB)        || \
    defined(TORCH_CUDA_BUILD_MAIN_LIB)    || \
    defined(TORCH_HIP_BUILD_MAIN_LIB)     || \
    defined(TORCH_XPU_BUILD_MAIN_LIB)     || \
    defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
    defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#endif

// @generated by torchgen/gen.py from RegisterDispatchKey.cpp

#include <c10/core/TensorImpl.h>
#include <c10/core/Allocator.h>
#include <ATen/DeviceGuard.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/Dispatch.h>
#include <c10/util/ExclusivelyOwned.h>
#include <c10/util/Half.h>
#include <c10/core/UndefinedTensorImpl.h>
#include <optional>
#include <ATen/Tensor.h>
#include <ATen/native/Resize.h>

#include <cstddef>
#include <functional>
#include <memory>
#include <utility>

#include <ATen/Config.h>
#include <ATen/core/op_registration/adaption.h>
#include <torch/library.h>


#include <ATen/ops/as_strided_native.h>
#include <ATen/EmptyTensor.h>
#include <c10/macros/Macros.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool2d_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool2d_native.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool3d_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool3d_native.h>
#include <ATen/ops/_add_relu_cpu_dispatch.h>
#include <ATen/ops/_add_relu_native.h>
#include <ATen/ops/_addmm_activation_cpu_dispatch.h>
#include <ATen/ops/_addmm_activation_native.h>
#include <ATen/ops/_aminmax_cpu_dispatch.h>
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_cpu_dispatch.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h>
#include <ATen/ops/_amp_update_scale_cpu_dispatch.h>
#include <ATen/ops/_amp_update_scale_native.h>
#include <ATen/ops/_assert_async_cpu_dispatch.h>
#include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_batch_norm_with_update_cpu_dispatch.h>
#include <ATen/ops/_batch_norm_with_update_native.h>
#include <ATen/ops/_cdist_backward_cpu_dispatch.h>
#include <ATen/ops/_cdist_backward_native.h>
#include <ATen/ops/_cdist_forward_cpu_dispatch.h>
#include <ATen/ops/_cdist_forward_native.h>
#include <ATen/ops/_cholesky_solve_helper_cpu_dispatch.h>
#include <ATen/ops/_cholesky_solve_helper_native.h>
#include <ATen/ops/_compute_linear_combination_cpu_dispatch.h>
#include <ATen/ops/_compute_linear_combination_native.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr_cpu_dispatch.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr_native.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo_native.h>
#include <ATen/ops/_convert_weight_to_int4pack_for_cpu_cpu_dispatch.h>
#include <ATen/ops/_convert_weight_to_int4pack_for_cpu_native.h>
#include <ATen/ops/_ctc_loss_backward_cpu_dispatch.h>
#include <ATen/ops/_ctc_loss_backward_native.h>
#include <ATen/ops/_ctc_loss_cpu_dispatch.h>
#include <ATen/ops/_ctc_loss_native.h>
#include <ATen/ops/_cummax_helper_cpu_dispatch.h>
#include <ATen/ops/_cummax_helper_native.h>
#include <ATen/ops/_cummin_helper_cpu_dispatch.h>
#include <ATen/ops/_cummin_helper_native.h>
#include <ATen/ops/_dirichlet_grad_cpu_dispatch.h>
#include <ATen/ops/_dirichlet_grad_native.h>
#include <ATen/ops/_dyn_quant_matmul_4bit_cpu_dispatch.h>
#include <ATen/ops/_dyn_quant_matmul_4bit_native.h>
#include <ATen/ops/_dyn_quant_pack_4bit_weight_cpu_dispatch.h>
#include <ATen/ops/_dyn_quant_pack_4bit_weight_native.h>
#include <ATen/ops/_efficientzerotensor_cpu_dispatch.h>
#include <ATen/ops/_efficientzerotensor_native.h>
#include <ATen/ops/_embedding_bag_backward_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_backward_native.h>
#include <ATen/ops/_embedding_bag_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_dense_backward_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_dense_backward_native.h>
#include <ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_forward_only_native.h>
#include <ATen/ops/_embedding_bag_native.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
#include <ATen/ops/_empty_affine_quantized_cpu_dispatch.h>
#include <ATen/ops/_empty_affine_quantized_native.h>
#include <ATen/ops/_empty_per_channel_affine_quantized_cpu_dispatch.h>
#include <ATen/ops/_empty_per_channel_affine_quantized_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h>
#include <ATen/ops/_fft_c2c_cpu_dispatch.h>
#include <ATen/ops/_fft_c2c_native.h>
#include <ATen/ops/_fft_c2r_cpu_dispatch.h>
#include <ATen/ops/_fft_c2r_native.h>
#include <ATen/ops/_fft_r2c_cpu_dispatch.h>
#include <ATen/ops/_fft_r2c_native.h>
#include <ATen/ops/_foobar_cpu_dispatch.h>
#include <ATen/ops/_foobar_native.h>
#include <ATen/ops/_functional_assert_async_cpu_dispatch.h>
#include <ATen/ops/_functional_assert_async_native.h>
#include <ATen/ops/_fused_adagrad_cpu_dispatch.h>
#include <ATen/ops/_fused_adagrad_native.h>
#include <ATen/ops/_fused_adam_cpu_dispatch.h>
#include <ATen/ops/_fused_adam_native.h>
#include <ATen/ops/_fused_adamw_cpu_dispatch.h>
#include <ATen/ops/_fused_adamw_native.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_cpu_dispatch.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h>
#include <ATen/ops/_fused_sdp_choice_cpu_dispatch.h>
#include <ATen/ops/_fused_sdp_choice_native.h>
#include <ATen/ops/_fused_sgd_cpu_dispatch.h>
#include <ATen/ops/_fused_sgd_native.h>
#include <ATen/ops/_histogramdd_bin_edges_cpu_dispatch.h>
#include <ATen/ops/_histogramdd_bin_edges_native.h>
#include <ATen/ops/_histogramdd_from_bin_cts_cpu_dispatch.h>
#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
#include <ATen/ops/_histogramdd_from_bin_tensors_cpu_dispatch.h>
#include <ATen/ops/_histogramdd_from_bin_tensors_native.h>
#include <ATen/ops/_index_put_impl_cpu_dispatch.h>
#include <ATen/ops/_index_put_impl_native.h>
#include <ATen/ops/_int_mm_cpu_dispatch.h>
#include <ATen/ops/_int_mm_native.h>
#include <ATen/ops/_jagged_to_padded_dense_forward_cpu_dispatch.h>
#include <ATen/ops/_jagged_to_padded_dense_forward_native.h>
#include <ATen/ops/_linalg_det_cpu_dispatch.h>
#include <ATen/ops/_linalg_det_native.h>
#include <ATen/ops/_linalg_eigh_cpu_dispatch.h>
#include <ATen/ops/_linalg_eigh_native.h>
#include <ATen/ops/_linalg_eigvals_cpu_dispatch.h>
#include <ATen/ops/_linalg_eigvals_native.h>
#include <ATen/ops/_linalg_slogdet_cpu_dispatch.h>
#include <ATen/ops/_linalg_slogdet_native.h>
#include <ATen/ops/_linalg_solve_ex_cpu_dispatch.h>
#include <ATen/ops/_linalg_solve_ex_native.h>
#include <ATen/ops/_linalg_svd_cpu_dispatch.h>
#include <ATen/ops/_linalg_svd_native.h>
#include <ATen/ops/_local_scalar_dense_cpu_dispatch.h>
#include <ATen/ops/_local_scalar_dense_native.h>
#include <ATen/ops/_log_softmax_backward_data_cpu_dispatch.h>
#include <ATen/ops/_log_softmax_backward_data_native.h>
#include <ATen/ops/_log_softmax_cpu_dispatch.h>
#include <ATen/ops/_log_softmax_native.h>
#include <ATen/ops/_logcumsumexp_cpu_dispatch.h>
#include <ATen/ops/_logcumsumexp_native.h>
#include <ATen/ops/_make_dep_token_cpu_dispatch.h>
#include <ATen/ops/_make_dep_token_native.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_cpu_dispatch.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
#include <ATen/ops/_masked_softmax_backward_cpu_dispatch.h>
#include <ATen/ops/_masked_softmax_backward_native.h>
#include <ATen/ops/_masked_softmax_cpu_dispatch.h>
#include <ATen/ops/_masked_softmax_native.h>
#include <ATen/ops/_native_batch_norm_legit_cpu_dispatch.h>
#include <ATen/ops/_native_batch_norm_legit_native.h>
#include <ATen/ops/_native_multi_head_attention_cpu_dispatch.h>
#include <ATen/ops/_native_multi_head_attention_native.h>
#include <ATen/ops/_nested_compute_contiguous_strides_offsets_cpu_dispatch.h>
#include <ATen/ops/_nested_compute_contiguous_strides_offsets_native.h>
#include <ATen/ops/_nested_from_padded_cpu_dispatch.h>
#include <ATen/ops/_nested_from_padded_native.h>
#include <ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h>
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_cpu_dispatch.h>
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_native.h>
#include <ATen/ops/_nested_tensor_from_mask_native.h>
#include <ATen/ops/_nested_view_from_buffer_cpu_dispatch.h>
#include <ATen/ops/_nested_view_from_buffer_native.h>
#include <ATen/ops/_padded_dense_to_jagged_forward_cpu_dispatch.h>
#include <ATen/ops/_padded_dense_to_jagged_forward_native.h>
#include <ATen/ops/_pdist_backward_cpu_dispatch.h>
#include <ATen/ops/_pdist_backward_native.h>
#include <ATen/ops/_pdist_forward_cpu_dispatch.h>
#include <ATen/ops/_pdist_forward_native.h>
#include <ATen/ops/_prelu_kernel_backward_cpu_dispatch.h>
#include <ATen/ops/_prelu_kernel_backward_native.h>
#include <ATen/ops/_prelu_kernel_cpu_dispatch.h>
#include <ATen/ops/_prelu_kernel_native.h>
#include <ATen/ops/_reshape_alias_cpu_dispatch.h>
#include <ATen/ops/_reshape_alias_native.h>
#include <ATen/ops/_sample_dirichlet_cpu_dispatch.h>
#include <ATen/ops/_sample_dirichlet_native.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_cpu_dispatch.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_native.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_cpu_dispatch.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_native.h>
#include <ATen/ops/_segment_reduce_backward_cpu_dispatch.h>
#include <ATen/ops/_segment_reduce_backward_native.h>
#include <ATen/ops/_slow_conv2d_backward_cpu_dispatch.h>
#include <ATen/ops/_slow_conv2d_backward_native.h>
#include <ATen/ops/_slow_conv2d_forward_cpu_dispatch.h>
#include <ATen/ops/_slow_conv2d_forward_native.h>
#include <ATen/ops/_softmax_backward_data_cpu_dispatch.h>
#include <ATen/ops/_softmax_backward_data_native.h>
#include <ATen/ops/_softmax_cpu_dispatch.h>
#include <ATen/ops/_softmax_native.h>
#include <ATen/ops/_spdiags_cpu_dispatch.h>
#include <ATen/ops/_spdiags_native.h>
#include <ATen/ops/_stack_cpu_dispatch.h>
#include <ATen/ops/_stack_native.h>
#include <ATen/ops/_standard_gamma_cpu_dispatch.h>
#include <ATen/ops/_standard_gamma_grad_cpu_dispatch.h>
#include <ATen/ops/_standard_gamma_grad_native.h>
#include <ATen/ops/_standard_gamma_native.h>
#include <ATen/ops/_test_functorch_fallback_cpu_dispatch.h>
#include <ATen/ops/_test_functorch_fallback_native.h>
#include <ATen/ops/_test_optional_filled_intlist_cpu_dispatch.h>
#include <ATen/ops/_test_optional_filled_intlist_native.h>
#include <ATen/ops/_test_optional_floatlist_cpu_dispatch.h>
#include <ATen/ops/_test_optional_floatlist_native.h>
#include <ATen/ops/_test_optional_intlist_cpu_dispatch.h>
#include <ATen/ops/_test_optional_intlist_native.h>
#include <ATen/ops/_to_sparse_bsc_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_bsc_native.h>
#include <ATen/ops/_to_sparse_bsr_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_bsr_native.h>
#include <ATen/ops/_to_sparse_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_csc_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_csc_native.h>
#include <ATen/ops/_to_sparse_csr_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_csr_native.h>
#include <ATen/ops/_to_sparse_native.h>
#include <ATen/ops/_transform_bias_rescale_qkv_cpu_dispatch.h>
#include <ATen/ops/_transform_bias_rescale_qkv_native.h>
#include <ATen/ops/_transformer_encoder_layer_fwd_cpu_dispatch.h>
#include <ATen/ops/_transformer_encoder_layer_fwd_native.h>
#include <ATen/ops/_unique2_cpu_dispatch.h>
#include <ATen/ops/_unique2_native.h>
#include <ATen/ops/_unique_cpu_dispatch.h>
#include <ATen/ops/_unique_native.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h>
#include <ATen/ops/_upsample_bicubic2d_aa_cpu_dispatch.h>
#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h>
#include <ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h>
#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact1d_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact1d_native.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact2d_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact2d_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
#include <ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h>
#include <ATen/ops/_validate_compressed_sparse_indices_native.h>
#include <ATen/ops/_weight_int4pack_mm_for_cpu_cpu_dispatch.h>
#include <ATen/ops/_weight_int4pack_mm_for_cpu_native.h>
#include <ATen/ops/_weight_int8pack_mm_cpu_dispatch.h>
#include <ATen/ops/_weight_int8pack_mm_native.h>
#include <ATen/ops/_weight_norm_interface_backward_cpu_dispatch.h>
#include <ATen/ops/_weight_norm_interface_backward_native.h>
#include <ATen/ops/_weight_norm_interface_cpu_dispatch.h>
#include <ATen/ops/_weight_norm_interface_native.h>
#include <ATen/ops/abs_cpu_dispatch.h>
#include <ATen/ops/abs_native.h>
#include <ATen/ops/acos_cpu_dispatch.h>
#include <ATen/ops/acos_native.h>
#include <ATen/ops/acosh_cpu_dispatch.h>
#include <ATen/ops/acosh_native.h>
#include <ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h>
#include <ATen/ops/adaptive_avg_pool2d_native.h>
#include <ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/adaptive_avg_pool3d_backward_native.h>
#include <ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h>
#include <ATen/ops/adaptive_avg_pool3d_native.h>
#include <ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
#include <ATen/ops/adaptive_max_pool2d_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool2d_native.h>
#include <ATen/ops/adaptive_max_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool3d_backward_native.h>
#include <ATen/ops/adaptive_max_pool3d_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool3d_native.h>
#include <ATen/ops/add_cpu_dispatch.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/addbmm_cpu_dispatch.h>
#include <ATen/ops/addbmm_native.h>
#include <ATen/ops/addcdiv_cpu_dispatch.h>
#include <ATen/ops/addcdiv_native.h>
#include <ATen/ops/addcmul_cpu_dispatch.h>
#include <ATen/ops/addcmul_native.h>
#include <ATen/ops/addmm_cpu_dispatch.h>
#include <ATen/ops/addmm_native.h>
#include <ATen/ops/addmv_cpu_dispatch.h>
#include <ATen/ops/addmv_native.h>
#include <ATen/ops/addr_cpu_dispatch.h>
#include <ATen/ops/addr_native.h>
#include <ATen/ops/all_cpu_dispatch.h>
#include <ATen/ops/all_native.h>
#include <ATen/ops/amax_cpu_dispatch.h>
#include <ATen/ops/amax_native.h>
#include <ATen/ops/amin_cpu_dispatch.h>
#include <ATen/ops/amin_native.h>
#include <ATen/ops/aminmax_cpu_dispatch.h>
#include <ATen/ops/aminmax_native.h>
#include <ATen/ops/angle_cpu_dispatch.h>
#include <ATen/ops/angle_native.h>
#include <ATen/ops/any_cpu_dispatch.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/arange_cpu_dispatch.h>
#include <ATen/ops/arange_native.h>
#include <ATen/ops/argmax_cpu_dispatch.h>
#include <ATen/ops/argmax_native.h>
#include <ATen/ops/argmin_cpu_dispatch.h>
#include <ATen/ops/argmin_native.h>
#include <ATen/ops/as_strided_cpu_dispatch.h>
#include <ATen/ops/as_strided_native.h>
#include <ATen/ops/asin_cpu_dispatch.h>
#include <ATen/ops/asin_native.h>
#include <ATen/ops/asinh_cpu_dispatch.h>
#include <ATen/ops/asinh_native.h>
#include <ATen/ops/atan2_cpu_dispatch.h>
#include <ATen/ops/atan2_native.h>
#include <ATen/ops/atan_cpu_dispatch.h>
#include <ATen/ops/atan_native.h>
#include <ATen/ops/atanh_cpu_dispatch.h>
#include <ATen/ops/atanh_native.h>
#include <ATen/ops/avg_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/avg_pool2d_backward_native.h>
#include <ATen/ops/avg_pool2d_cpu_dispatch.h>
#include <ATen/ops/avg_pool2d_native.h>
#include <ATen/ops/avg_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/avg_pool3d_backward_native.h>
#include <ATen/ops/avg_pool3d_cpu_dispatch.h>
#include <ATen/ops/avg_pool3d_native.h>
#include <ATen/ops/baddbmm_cpu_dispatch.h>
#include <ATen/ops/baddbmm_native.h>
#include <ATen/ops/batch_norm_backward_cpu_dispatch.h>
#include <ATen/ops/batch_norm_backward_native.h>
#include <ATen/ops/batch_norm_update_stats_cpu_dispatch.h>
#include <ATen/ops/batch_norm_update_stats_native.h>
#include <ATen/ops/bernoulli_cpu_dispatch.h>
#include <ATen/ops/bernoulli_native.h>
#include <ATen/ops/binary_cross_entropy_backward_cpu_dispatch.h>
#include <ATen/ops/binary_cross_entropy_backward_native.h>
#include <ATen/ops/binary_cross_entropy_cpu_dispatch.h>
#include <ATen/ops/binary_cross_entropy_native.h>
#include <ATen/ops/bincount_cpu_dispatch.h>
#include <ATen/ops/bincount_native.h>
#include <ATen/ops/binomial_cpu_dispatch.h>
#include <ATen/ops/binomial_native.h>
#include <ATen/ops/bitwise_and_cpu_dispatch.h>
#include <ATen/ops/bitwise_and_native.h>
#include <ATen/ops/bitwise_left_shift_cpu_dispatch.h>
#include <ATen/ops/bitwise_left_shift_native.h>
#include <ATen/ops/bitwise_not_cpu_dispatch.h>
#include <ATen/ops/bitwise_not_native.h>
#include <ATen/ops/bitwise_or_cpu_dispatch.h>
#include <ATen/ops/bitwise_or_native.h>
#include <ATen/ops/bitwise_right_shift_cpu_dispatch.h>
#include <ATen/ops/bitwise_right_shift_native.h>
#include <ATen/ops/bitwise_xor_cpu_dispatch.h>
#include <ATen/ops/bitwise_xor_native.h>
#include <ATen/ops/bmm_cpu_dispatch.h>
#include <ATen/ops/bmm_native.h>
#include <ATen/ops/bucketize_cpu_dispatch.h>
#include <ATen/ops/bucketize_native.h>
#include <ATen/ops/cat_cpu_dispatch.h>
#include <ATen/ops/cat_native.h>
#include <ATen/ops/cauchy_cpu_dispatch.h>
#include <ATen/ops/cauchy_native.h>
#include <ATen/ops/ceil_cpu_dispatch.h>
#include <ATen/ops/ceil_native.h>
#include <ATen/ops/channel_shuffle_cpu_dispatch.h>
#include <ATen/ops/channel_shuffle_native.h>
#include <ATen/ops/cholesky_cpu_dispatch.h>
#include <ATen/ops/cholesky_inverse_cpu_dispatch.h>
#include <ATen/ops/cholesky_inverse_native.h>
#include <ATen/ops/cholesky_native.h>
#include <ATen/ops/clamp_cpu_dispatch.h>
#include <ATen/ops/clamp_max_cpu_dispatch.h>
#include <ATen/ops/clamp_max_native.h>
#include <ATen/ops/clamp_min_cpu_dispatch.h>
#include <ATen/ops/clamp_min_native.h>
#include <ATen/ops/clamp_native.h>
#include <ATen/ops/col2im_cpu_dispatch.h>
#include <ATen/ops/col2im_native.h>
#include <ATen/ops/complex_cpu_dispatch.h>
#include <ATen/ops/complex_native.h>
#include <ATen/ops/conj_physical_cpu_dispatch.h>
#include <ATen/ops/conj_physical_native.h>
#include <ATen/ops/copysign_cpu_dispatch.h>
#include <ATen/ops/copysign_native.h>
#include <ATen/ops/cos_cpu_dispatch.h>
#include <ATen/ops/cos_native.h>
#include <ATen/ops/cosh_cpu_dispatch.h>
#include <ATen/ops/cosh_native.h>
#include <ATen/ops/count_nonzero_cpu_dispatch.h>
#include <ATen/ops/count_nonzero_native.h>
#include <ATen/ops/cumprod_cpu_dispatch.h>
#include <ATen/ops/cumprod_native.h>
#include <ATen/ops/cumsum_cpu_dispatch.h>
#include <ATen/ops/cumsum_native.h>
#include <ATen/ops/dequantize_cpu_dispatch.h>
#include <ATen/ops/dequantize_native.h>
#include <ATen/ops/digamma_cpu_dispatch.h>
#include <ATen/ops/digamma_native.h>
#include <ATen/ops/div_cpu_dispatch.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/dot_cpu_dispatch.h>
#include <ATen/ops/dot_native.h>
#include <ATen/ops/elu_backward_cpu_dispatch.h>
#include <ATen/ops/elu_backward_native.h>
#include <ATen/ops/elu_cpu_dispatch.h>
#include <ATen/ops/elu_native.h>
#include <ATen/ops/embedding_dense_backward_cpu_dispatch.h>
#include <ATen/ops/embedding_dense_backward_native.h>
#include <ATen/ops/embedding_renorm_cpu_dispatch.h>
#include <ATen/ops/embedding_renorm_native.h>
#include <ATen/ops/empty_cpu_dispatch.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/empty_strided_cpu_dispatch.h>
#include <ATen/ops/empty_strided_native.h>
#include <ATen/ops/eq_cpu_dispatch.h>
#include <ATen/ops/eq_native.h>
#include <ATen/ops/equal_cpu_dispatch.h>
#include <ATen/ops/equal_native.h>
#include <ATen/ops/erf_cpu_dispatch.h>
#include <ATen/ops/erf_native.h>
#include <ATen/ops/erfc_cpu_dispatch.h>
#include <ATen/ops/erfc_native.h>
#include <ATen/ops/erfinv_cpu_dispatch.h>
#include <ATen/ops/erfinv_native.h>
#include <ATen/ops/exp2_cpu_dispatch.h>
#include <ATen/ops/exp2_native.h>
#include <ATen/ops/exp_cpu_dispatch.h>
#include <ATen/ops/exp_native.h>
#include <ATen/ops/expm1_cpu_dispatch.h>
#include <ATen/ops/expm1_native.h>
#include <ATen/ops/exponential_cpu_dispatch.h>
#include <ATen/ops/exponential_native.h>
#include <ATen/ops/eye_cpu_dispatch.h>
#include <ATen/ops/eye_native.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_cpu_dispatch.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h>
#include <ATen/ops/fill_cpu_dispatch.h>
#include <ATen/ops/fill_native.h>
#include <ATen/ops/flip_cpu_dispatch.h>
#include <ATen/ops/flip_native.h>
#include <ATen/ops/floor_cpu_dispatch.h>
#include <ATen/ops/floor_divide_cpu_dispatch.h>
#include <ATen/ops/floor_divide_native.h>
#include <ATen/ops/floor_native.h>
#include <ATen/ops/fmax_cpu_dispatch.h>
#include <ATen/ops/fmax_native.h>
#include <ATen/ops/fmin_cpu_dispatch.h>
#include <ATen/ops/fmin_native.h>
#include <ATen/ops/fmod_cpu_dispatch.h>
#include <ATen/ops/fmod_native.h>
#include <ATen/ops/frac_cpu_dispatch.h>
#include <ATen/ops/frac_native.h>
#include <ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool2d_backward_native.h>
#include <ATen/ops/fractional_max_pool2d_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool2d_native.h>
#include <ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool3d_backward_native.h>
#include <ATen/ops/fractional_max_pool3d_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool3d_native.h>
#include <ATen/ops/frexp_cpu_dispatch.h>
#include <ATen/ops/frexp_native.h>
#include <ATen/ops/from_file_cpu_dispatch.h>
#include <ATen/ops/from_file_native.h>
#include <ATen/ops/gather_cpu_dispatch.h>
#include <ATen/ops/gather_native.h>
#include <ATen/ops/gcd_cpu_dispatch.h>
#include <ATen/ops/gcd_native.h>
#include <ATen/ops/ge_cpu_dispatch.h>
#include <ATen/ops/ge_native.h>
#include <ATen/ops/gelu_backward_cpu_dispatch.h>
#include <ATen/ops/gelu_backward_native.h>
#include <ATen/ops/gelu_cpu_dispatch.h>
#include <ATen/ops/gelu_native.h>
#include <ATen/ops/geometric_cpu_dispatch.h>
#include <ATen/ops/geometric_native.h>
#include <ATen/ops/geqrf_cpu_dispatch.h>
#include <ATen/ops/geqrf_native.h>
#include <ATen/ops/glu_backward_cpu_dispatch.h>
#include <ATen/ops/glu_backward_jvp_cpu_dispatch.h>
#include <ATen/ops/glu_backward_jvp_native.h>
#include <ATen/ops/glu_backward_native.h>
#include <ATen/ops/glu_cpu_dispatch.h>
#include <ATen/ops/glu_jvp_cpu_dispatch.h>
#include <ATen/ops/glu_jvp_native.h>
#include <ATen/ops/glu_native.h>
#include <ATen/ops/grid_sampler_2d_backward_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_2d_backward_native.h>
#include <ATen/ops/grid_sampler_2d_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_2d_native.h>
#include <ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_3d_backward_native.h>
#include <ATen/ops/grid_sampler_3d_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_3d_native.h>
#include <ATen/ops/gt_cpu_dispatch.h>
#include <ATen/ops/gt_native.h>
#include <ATen/ops/hardshrink_backward_cpu_dispatch.h>
#include <ATen/ops/hardshrink_backward_native.h>
#include <ATen/ops/hardshrink_cpu_dispatch.h>
#include <ATen/ops/hardshrink_native.h>
#include <ATen/ops/hardsigmoid_backward_cpu_dispatch.h>
#include <ATen/ops/hardsigmoid_backward_native.h>
#include <ATen/ops/hardsigmoid_cpu_dispatch.h>
#include <ATen/ops/hardsigmoid_native.h>
#include <ATen/ops/hardswish_backward_cpu_dispatch.h>
#include <ATen/ops/hardswish_backward_native.h>
#include <ATen/ops/hardswish_cpu_dispatch.h>
#include <ATen/ops/hardswish_native.h>
#include <ATen/ops/hardtanh_backward_cpu_dispatch.h>
#include <ATen/ops/hardtanh_backward_native.h>
#include <ATen/ops/hardtanh_cpu_dispatch.h>
#include <ATen/ops/hardtanh_native.h>
#include <ATen/ops/heaviside_cpu_dispatch.h>
#include <ATen/ops/heaviside_native.h>
#include <ATen/ops/histc_cpu_dispatch.h>
#include <ATen/ops/histc_native.h>
#include <ATen/ops/histogram_cpu_dispatch.h>
#include <ATen/ops/histogram_native.h>
#include <ATen/ops/huber_loss_backward_cpu_dispatch.h>
#include <ATen/ops/huber_loss_backward_native.h>
#include <ATen/ops/huber_loss_cpu_dispatch.h>
#include <ATen/ops/huber_loss_native.h>
#include <ATen/ops/hypot_cpu_dispatch.h>
#include <ATen/ops/hypot_native.h>
#include <ATen/ops/i0_cpu_dispatch.h>
#include <ATen/ops/i0_native.h>
#include <ATen/ops/igamma_cpu_dispatch.h>
#include <ATen/ops/igamma_native.h>
#include <ATen/ops/igammac_cpu_dispatch.h>
#include <ATen/ops/igammac_native.h>
#include <ATen/ops/im2col_cpu_dispatch.h>
#include <ATen/ops/im2col_native.h>
#include <ATen/ops/index_add_cpu_dispatch.h>
#include <ATen/ops/index_add_native.h>
#include <ATen/ops/index_copy_cpu_dispatch.h>
#include <ATen/ops/index_copy_native.h>
#include <ATen/ops/index_cpu_dispatch.h>
#include <ATen/ops/index_fill_cpu_dispatch.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_native.h>
#include <ATen/ops/index_reduce_cpu_dispatch.h>
#include <ATen/ops/index_reduce_native.h>
#include <ATen/ops/index_select_cpu_dispatch.h>
#include <ATen/ops/index_select_native.h>
#include <ATen/ops/is_set_to_cpu_dispatch.h>
#include <ATen/ops/is_set_to_native.h>
#include <ATen/ops/isin_cpu_dispatch.h>
#include <ATen/ops/isin_native.h>
#include <ATen/ops/isnan_cpu_dispatch.h>
#include <ATen/ops/isnan_native.h>
#include <ATen/ops/isneginf_cpu_dispatch.h>
#include <ATen/ops/isneginf_native.h>
#include <ATen/ops/isposinf_cpu_dispatch.h>
#include <ATen/ops/isposinf_native.h>
#include <ATen/ops/kthvalue_cpu_dispatch.h>
#include <ATen/ops/kthvalue_native.h>
#include <ATen/ops/lcm_cpu_dispatch.h>
#include <ATen/ops/lcm_native.h>
#include <ATen/ops/le_cpu_dispatch.h>
#include <ATen/ops/le_native.h>
#include <ATen/ops/leaky_relu_backward_cpu_dispatch.h>
#include <ATen/ops/leaky_relu_backward_native.h>
#include <ATen/ops/leaky_relu_cpu_dispatch.h>
#include <ATen/ops/leaky_relu_native.h>
#include <ATen/ops/lerp_cpu_dispatch.h>
#include <ATen/ops/lerp_native.h>
#include <ATen/ops/lgamma_cpu_dispatch.h>
#include <ATen/ops/lgamma_native.h>
#include <ATen/ops/linalg_cholesky_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_cholesky_ex_native.h>
#include <ATen/ops/linalg_cross_cpu_dispatch.h>
#include <ATen/ops/linalg_cross_native.h>
#include <ATen/ops/linalg_eig_cpu_dispatch.h>
#include <ATen/ops/linalg_eig_native.h>
#include <ATen/ops/linalg_eigvals_cpu_dispatch.h>
#include <ATen/ops/linalg_eigvals_native.h>
#include <ATen/ops/linalg_householder_product_cpu_dispatch.h>
#include <ATen/ops/linalg_householder_product_native.h>
#include <ATen/ops/linalg_inv_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_inv_ex_native.h>
#include <ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_ldl_factor_ex_native.h>
#include <ATen/ops/linalg_ldl_solve_cpu_dispatch.h>
#include <ATen/ops/linalg_ldl_solve_native.h>
#include <ATen/ops/linalg_lstsq_cpu_dispatch.h>
#include <ATen/ops/linalg_lstsq_native.h>
#include <ATen/ops/linalg_lu_cpu_dispatch.h>
#include <ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_lu_factor_ex_native.h>
#include <ATen/ops/linalg_lu_native.h>
#include <ATen/ops/linalg_lu_solve_cpu_dispatch.h>
#include <ATen/ops/linalg_lu_solve_native.h>
#include <ATen/ops/linalg_matrix_exp_cpu_dispatch.h>
#include <ATen/ops/linalg_matrix_exp_native.h>
#include <ATen/ops/linalg_qr_cpu_dispatch.h>
#include <ATen/ops/linalg_qr_native.h>
#include <ATen/ops/linalg_solve_triangular_cpu_dispatch.h>
#include <ATen/ops/linalg_solve_triangular_native.h>
#include <ATen/ops/linalg_vector_norm_cpu_dispatch.h>
#include <ATen/ops/linalg_vector_norm_native.h>
#include <ATen/ops/linspace_cpu_dispatch.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/log10_cpu_dispatch.h>
#include <ATen/ops/log10_native.h>
#include <ATen/ops/log1p_cpu_dispatch.h>
#include <ATen/ops/log1p_native.h>
#include <ATen/ops/log2_cpu_dispatch.h>
#include <ATen/ops/log2_native.h>
#include <ATen/ops/log_cpu_dispatch.h>
#include <ATen/ops/log_native.h>
#include <ATen/ops/log_normal_cpu_dispatch.h>
#include <ATen/ops/log_normal_native.h>
#include <ATen/ops/log_sigmoid_backward_cpu_dispatch.h>
#include <ATen/ops/log_sigmoid_backward_native.h>
#include <ATen/ops/log_sigmoid_forward_cpu_dispatch.h>
#include <ATen/ops/log_sigmoid_forward_native.h>
#include <ATen/ops/logaddexp2_cpu_dispatch.h>
#include <ATen/ops/logaddexp2_native.h>
#include <ATen/ops/logaddexp_cpu_dispatch.h>
#include <ATen/ops/logaddexp_native.h>
#include <ATen/ops/logical_and_cpu_dispatch.h>
#include <ATen/ops/logical_and_native.h>
#include <ATen/ops/logical_not_cpu_dispatch.h>
#include <ATen/ops/logical_not_native.h>
#include <ATen/ops/logical_or_cpu_dispatch.h>
#include <ATen/ops/logical_or_native.h>
#include <ATen/ops/logical_xor_cpu_dispatch.h>
#include <ATen/ops/logical_xor_native.h>
#include <ATen/ops/logit_backward_cpu_dispatch.h>
#include <ATen/ops/logit_backward_native.h>
#include <ATen/ops/logit_cpu_dispatch.h>
#include <ATen/ops/logit_native.h>
#include <ATen/ops/logspace_cpu_dispatch.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/lshift_cpu_dispatch.h>
#include <ATen/ops/lshift_native.h>
#include <ATen/ops/lt_cpu_dispatch.h>
#include <ATen/ops/lt_native.h>
#include <ATen/ops/lu_unpack_cpu_dispatch.h>
#include <ATen/ops/lu_unpack_native.h>
#include <ATen/ops/masked_fill_cpu_dispatch.h>
#include <ATen/ops/masked_fill_native.h>
#include <ATen/ops/masked_scatter_cpu_dispatch.h>
#include <ATen/ops/masked_scatter_native.h>
#include <ATen/ops/masked_select_cpu_dispatch.h>
#include <ATen/ops/masked_select_native.h>
#include <ATen/ops/max_cpu_dispatch.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h>
#include <ATen/ops/max_pool2d_with_indices_backward_native.h>
#include <ATen/ops/max_pool2d_with_indices_cpu_dispatch.h>
#include <ATen/ops/max_pool2d_with_indices_native.h>
#include <ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h>
#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
#include <ATen/ops/max_pool3d_with_indices_cpu_dispatch.h>
#include <ATen/ops/max_pool3d_with_indices_native.h>
#include <ATen/ops/max_unpool2d_cpu_dispatch.h>
#include <ATen/ops/max_unpool2d_native.h>
#include <ATen/ops/max_unpool3d_cpu_dispatch.h>
#include <ATen/ops/max_unpool3d_native.h>
#include <ATen/ops/maximum_cpu_dispatch.h>
#include <ATen/ops/maximum_native.h>
#include <ATen/ops/mean_cpu_dispatch.h>
#include <ATen/ops/mean_native.h>
#include <ATen/ops/median_cpu_dispatch.h>
#include <ATen/ops/median_native.h>
#include <ATen/ops/min_cpu_dispatch.h>
#include <ATen/ops/min_native.h>
#include <ATen/ops/minimum_cpu_dispatch.h>
#include <ATen/ops/minimum_native.h>
#include <ATen/ops/mish_backward_cpu_dispatch.h>
#include <ATen/ops/mish_backward_native.h>
#include <ATen/ops/mish_cpu_dispatch.h>
#include <ATen/ops/mish_native.h>
#include <ATen/ops/mkldnn_rnn_layer_backward_cpu_dispatch.h>
#include <ATen/ops/mkldnn_rnn_layer_backward_native.h>
#include <ATen/ops/mkldnn_rnn_layer_cpu_dispatch.h>
#include <ATen/ops/mkldnn_rnn_layer_native.h>
#include <ATen/ops/mm_cpu_dispatch.h>
#include <ATen/ops/mm_native.h>
#include <ATen/ops/mode_cpu_dispatch.h>
#include <ATen/ops/mode_native.h>
#include <ATen/ops/mse_loss_backward_cpu_dispatch.h>
#include <ATen/ops/mse_loss_backward_native.h>
#include <ATen/ops/mse_loss_cpu_dispatch.h>
#include <ATen/ops/mse_loss_native.h>
#include <ATen/ops/mul_cpu_dispatch.h>
#include <ATen/ops/mul_native.h>
#include <ATen/ops/multi_margin_loss_backward_cpu_dispatch.h>
#include <ATen/ops/multi_margin_loss_backward_native.h>
#include <ATen/ops/multi_margin_loss_cpu_dispatch.h>
#include <ATen/ops/multi_margin_loss_native.h>
#include <ATen/ops/multilabel_margin_loss_backward_cpu_dispatch.h>
#include <ATen/ops/multilabel_margin_loss_backward_native.h>
#include <ATen/ops/multilabel_margin_loss_forward_cpu_dispatch.h>
#include <ATen/ops/multilabel_margin_loss_forward_native.h>
#include <ATen/ops/multinomial_cpu_dispatch.h>
#include <ATen/ops/multinomial_native.h>
#include <ATen/ops/mvlgamma_cpu_dispatch.h>
#include <ATen/ops/mvlgamma_native.h>
#include <ATen/ops/nan_to_num_cpu_dispatch.h>
#include <ATen/ops/nan_to_num_native.h>
#include <ATen/ops/nanmedian_cpu_dispatch.h>
#include <ATen/ops/nanmedian_native.h>
#include <ATen/ops/nansum_cpu_dispatch.h>
#include <ATen/ops/nansum_native.h>
#include <ATen/ops/narrow_copy_cpu_dispatch.h>
#include <ATen/ops/narrow_copy_native.h>
#include <ATen/ops/native_batch_norm_backward_cpu_dispatch.h>
#include <ATen/ops/native_batch_norm_backward_native.h>
#include <ATen/ops/native_batch_norm_cpu_dispatch.h>
#include <ATen/ops/native_batch_norm_native.h>
#include <ATen/ops/native_channel_shuffle_cpu_dispatch.h>
#include <ATen/ops/native_channel_shuffle_native.h>
#include <ATen/ops/native_dropout_backward_cpu_dispatch.h>
#include <ATen/ops/native_dropout_backward_native.h>
#include <ATen/ops/native_dropout_cpu_dispatch.h>
#include <ATen/ops/native_dropout_native.h>
#include <ATen/ops/native_group_norm_backward_cpu_dispatch.h>
#include <ATen/ops/native_group_norm_backward_native.h>
#include <ATen/ops/native_group_norm_cpu_dispatch.h>
#include <ATen/ops/native_group_norm_native.h>
#include <ATen/ops/native_layer_norm_backward_cpu_dispatch.h>
#include <ATen/ops/native_layer_norm_backward_native.h>
#include <ATen/ops/native_layer_norm_cpu_dispatch.h>
#include <ATen/ops/native_layer_norm_native.h>
#include <ATen/ops/ne_cpu_dispatch.h>
#include <ATen/ops/ne_native.h>
#include <ATen/ops/neg_cpu_dispatch.h>
#include <ATen/ops/neg_native.h>
#include <ATen/ops/nextafter_cpu_dispatch.h>
#include <ATen/ops/nextafter_native.h>
#include <ATen/ops/nll_loss2d_backward_cpu_dispatch.h>
#include <ATen/ops/nll_loss2d_backward_native.h>
#include <ATen/ops/nll_loss2d_forward_cpu_dispatch.h>
#include <ATen/ops/nll_loss2d_forward_native.h>
#include <ATen/ops/nll_loss_backward_cpu_dispatch.h>
#include <ATen/ops/nll_loss_backward_native.h>
#include <ATen/ops/nll_loss_forward_cpu_dispatch.h>
#include <ATen/ops/nll_loss_forward_native.h>
#include <ATen/ops/nonzero_cpu_dispatch.h>
#include <ATen/ops/nonzero_native.h>
#include <ATen/ops/nonzero_static_cpu_dispatch.h>
#include <ATen/ops/nonzero_static_native.h>
#include <ATen/ops/norm_cpu_dispatch.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/normal_cpu_dispatch.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/ormqr_cpu_dispatch.h>
#include <ATen/ops/ormqr_native.h>
#include <ATen/ops/pixel_shuffle_cpu_dispatch.h>
#include <ATen/ops/pixel_shuffle_native.h>
#include <ATen/ops/pixel_unshuffle_cpu_dispatch.h>
#include <ATen/ops/pixel_unshuffle_native.h>
#include <ATen/ops/poisson_cpu_dispatch.h>
#include <ATen/ops/poisson_native.h>
#include <ATen/ops/polar_cpu_dispatch.h>
#include <ATen/ops/polar_native.h>
#include <ATen/ops/polygamma_cpu_dispatch.h>
#include <ATen/ops/polygamma_native.h>
#include <ATen/ops/pow_cpu_dispatch.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/prod_cpu_dispatch.h>
#include <ATen/ops/prod_native.h>
#include <ATen/ops/put_cpu_dispatch.h>
#include <ATen/ops/put_native.h>
#include <ATen/ops/quantize_per_channel_cpu_dispatch.h>
#include <ATen/ops/quantize_per_channel_native.h>
#include <ATen/ops/quantize_per_tensor_cpu_dispatch.h>
#include <ATen/ops/quantize_per_tensor_dynamic_cpu_dispatch.h>
#include <ATen/ops/quantize_per_tensor_dynamic_native.h>
#include <ATen/ops/quantize_per_tensor_native.h>
#include <ATen/ops/random_cpu_dispatch.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/randperm_cpu_dispatch.h>
#include <ATen/ops/randperm_native.h>
#include <ATen/ops/range_cpu_dispatch.h>
#include <ATen/ops/range_native.h>
#include <ATen/ops/reciprocal_cpu_dispatch.h>
#include <ATen/ops/reciprocal_native.h>
#include <ATen/ops/reflection_pad1d_backward_cpu_dispatch.h>
#include <ATen/ops/reflection_pad1d_backward_native.h>
#include <ATen/ops/reflection_pad1d_cpu_dispatch.h>
#include <ATen/ops/reflection_pad1d_native.h>
#include <ATen/ops/reflection_pad2d_backward_cpu_dispatch.h>
#include <ATen/ops/reflection_pad2d_backward_native.h>
#include <ATen/ops/reflection_pad2d_cpu_dispatch.h>
#include <ATen/ops/reflection_pad2d_native.h>
#include <ATen/ops/reflection_pad3d_backward_cpu_dispatch.h>
#include <ATen/ops/reflection_pad3d_backward_native.h>
#include <ATen/ops/reflection_pad3d_cpu_dispatch.h>
#include <ATen/ops/reflection_pad3d_native.h>
#include <ATen/ops/relu_cpu_dispatch.h>
#include <ATen/ops/relu_native.h>
#include <ATen/ops/remainder_cpu_dispatch.h>
#include <ATen/ops/remainder_native.h>
#include <ATen/ops/renorm_cpu_dispatch.h>
#include <ATen/ops/renorm_native.h>
#include <ATen/ops/repeat_interleave_cpu_dispatch.h>
#include <ATen/ops/repeat_interleave_native.h>
#include <ATen/ops/replication_pad1d_backward_cpu_dispatch.h>
#include <ATen/ops/replication_pad1d_backward_native.h>
#include <ATen/ops/replication_pad1d_cpu_dispatch.h>
#include <ATen/ops/replication_pad1d_native.h>
#include <ATen/ops/replication_pad2d_backward_cpu_dispatch.h>
#include <ATen/ops/replication_pad2d_backward_native.h>
#include <ATen/ops/replication_pad2d_cpu_dispatch.h>
#include <ATen/ops/replication_pad2d_native.h>
#include <ATen/ops/replication_pad3d_backward_cpu_dispatch.h>
#include <ATen/ops/replication_pad3d_backward_native.h>
#include <ATen/ops/replication_pad3d_cpu_dispatch.h>
#include <ATen/ops/replication_pad3d_native.h>
#include <ATen/ops/resize_cpu_dispatch.h>
#include <ATen/ops/resize_native.h>
#include <ATen/ops/roll_cpu_dispatch.h>
#include <ATen/ops/roll_native.h>
#include <ATen/ops/round_cpu_dispatch.h>
#include <ATen/ops/round_native.h>
#include <ATen/ops/rrelu_with_noise_cpu_dispatch.h>
#include <ATen/ops/rrelu_with_noise_native.h>
#include <ATen/ops/rshift_cpu_dispatch.h>
#include <ATen/ops/rshift_native.h>
#include <ATen/ops/rsqrt_cpu_dispatch.h>
#include <ATen/ops/rsqrt_native.h>
#include <ATen/ops/rsub_cpu_dispatch.h>
#include <ATen/ops/rsub_native.h>
#include <ATen/ops/scatter_add_cpu_dispatch.h>
#include <ATen/ops/scatter_add_native.h>
#include <ATen/ops/scatter_cpu_dispatch.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_reduce_cpu_dispatch.h>
#include <ATen/ops/scatter_reduce_native.h>
#include <ATen/ops/searchsorted_cpu_dispatch.h>
#include <ATen/ops/searchsorted_native.h>
#include <ATen/ops/segment_reduce_cpu_dispatch.h>
#include <ATen/ops/segment_reduce_native.h>
#include <ATen/ops/set_cpu_dispatch.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/sgn_cpu_dispatch.h>
#include <ATen/ops/sgn_native.h>
#include <ATen/ops/sigmoid_backward_cpu_dispatch.h>
#include <ATen/ops/sigmoid_backward_native.h>
#include <ATen/ops/sigmoid_cpu_dispatch.h>
#include <ATen/ops/sigmoid_native.h>
#include <ATen/ops/sign_cpu_dispatch.h>
#include <ATen/ops/sign_native.h>
#include <ATen/ops/signbit_cpu_dispatch.h>
#include <ATen/ops/signbit_native.h>
#include <ATen/ops/silu_backward_cpu_dispatch.h>
#include <ATen/ops/silu_backward_native.h>
#include <ATen/ops/silu_cpu_dispatch.h>
#include <ATen/ops/silu_native.h>
#include <ATen/ops/sin_cpu_dispatch.h>
#include <ATen/ops/sin_native.h>
#include <ATen/ops/sinc_cpu_dispatch.h>
#include <ATen/ops/sinc_native.h>
#include <ATen/ops/sinh_cpu_dispatch.h>
#include <ATen/ops/sinh_native.h>
#include <ATen/ops/slow_conv3d_forward_cpu_dispatch.h>
#include <ATen/ops/slow_conv3d_forward_native.h>
#include <ATen/ops/slow_conv_dilated2d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_dilated2d_native.h>
#include <ATen/ops/slow_conv_dilated3d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_dilated3d_native.h>
#include <ATen/ops/slow_conv_transpose2d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_transpose2d_native.h>
#include <ATen/ops/slow_conv_transpose3d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_transpose3d_native.h>
#include <ATen/ops/smooth_l1_loss_backward_cpu_dispatch.h>
#include <ATen/ops/smooth_l1_loss_backward_native.h>
#include <ATen/ops/smooth_l1_loss_cpu_dispatch.h>
#include <ATen/ops/smooth_l1_loss_native.h>
#include <ATen/ops/softplus_backward_cpu_dispatch.h>
#include <ATen/ops/softplus_backward_native.h>
#include <ATen/ops/softplus_cpu_dispatch.h>
#include <ATen/ops/softplus_native.h>
#include <ATen/ops/softshrink_backward_cpu_dispatch.h>
#include <ATen/ops/softshrink_backward_native.h>
#include <ATen/ops/softshrink_cpu_dispatch.h>
#include <ATen/ops/softshrink_native.h>
#include <ATen/ops/sort_cpu_dispatch.h>
#include <ATen/ops/sort_native.h>
#include <ATen/ops/special_airy_ai_cpu_dispatch.h>
#include <ATen/ops/special_airy_ai_native.h>
#include <ATen/ops/special_bessel_j0_cpu_dispatch.h>
#include <ATen/ops/special_bessel_j0_native.h>
#include <ATen/ops/special_bessel_j1_cpu_dispatch.h>
#include <ATen/ops/special_bessel_j1_native.h>
#include <ATen/ops/special_bessel_y0_cpu_dispatch.h>
#include <ATen/ops/special_bessel_y0_native.h>
#include <ATen/ops/special_bessel_y1_cpu_dispatch.h>
#include <ATen/ops/special_bessel_y1_native.h>
#include <ATen/ops/special_chebyshev_polynomial_t_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_chebyshev_polynomial_u_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_chebyshev_polynomial_v_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_chebyshev_polynomial_w_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_entr_cpu_dispatch.h>
#include <ATen/ops/special_entr_native.h>
#include <ATen/ops/special_erfcx_cpu_dispatch.h>
#include <ATen/ops/special_erfcx_native.h>
#include <ATen/ops/special_hermite_polynomial_h_cpu_dispatch.h>
#include <ATen/ops/special_hermite_polynomial_h_native.h>
#include <ATen/ops/special_hermite_polynomial_he_cpu_dispatch.h>
#include <ATen/ops/special_hermite_polynomial_he_native.h>
#include <ATen/ops/special_i0e_cpu_dispatch.h>
#include <ATen/ops/special_i0e_native.h>
#include <ATen/ops/special_i1_cpu_dispatch.h>
#include <ATen/ops/special_i1_native.h>
#include <ATen/ops/special_i1e_cpu_dispatch.h>
#include <ATen/ops/special_i1e_native.h>
#include <ATen/ops/special_laguerre_polynomial_l_cpu_dispatch.h>
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
#include <ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h>
#include <ATen/ops/special_legendre_polynomial_p_native.h>
#include <ATen/ops/special_log_ndtr_cpu_dispatch.h>
#include <ATen/ops/special_log_ndtr_native.h>
#include <ATen/ops/special_modified_bessel_i0_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_i0_native.h>
#include <ATen/ops/special_modified_bessel_i1_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_i1_native.h>
#include <ATen/ops/special_modified_bessel_k0_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_k0_native.h>
#include <ATen/ops/special_modified_bessel_k1_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_k1_native.h>
#include <ATen/ops/special_ndtri_cpu_dispatch.h>
#include <ATen/ops/special_ndtri_native.h>
#include <ATen/ops/special_scaled_modified_bessel_k0_cpu_dispatch.h>
#include <ATen/ops/special_scaled_modified_bessel_k0_native.h>
#include <ATen/ops/special_scaled_modified_bessel_k1_cpu_dispatch.h>
#include <ATen/ops/special_scaled_modified_bessel_k1_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_spherical_bessel_j0_cpu_dispatch.h>
#include <ATen/ops/special_spherical_bessel_j0_native.h>
#include <ATen/ops/special_xlog1py_cpu_dispatch.h>
#include <ATen/ops/special_xlog1py_native.h>
#include <ATen/ops/special_zeta_cpu_dispatch.h>
#include <ATen/ops/special_zeta_native.h>
#include <ATen/ops/sqrt_cpu_dispatch.h>
#include <ATen/ops/sqrt_native.h>
#include <ATen/ops/sspaddmm_cpu_dispatch.h>
#include <ATen/ops/sspaddmm_native.h>
#include <ATen/ops/std_cpu_dispatch.h>
#include <ATen/ops/std_mean_cpu_dispatch.h>
#include <ATen/ops/std_mean_native.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/sub_cpu_dispatch.h>
#include <ATen/ops/sub_native.h>
#include <ATen/ops/sum_cpu_dispatch.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/take_cpu_dispatch.h>
#include <ATen/ops/take_native.h>
#include <ATen/ops/tan_cpu_dispatch.h>
#include <ATen/ops/tan_native.h>
#include <ATen/ops/tanh_backward_cpu_dispatch.h>
#include <ATen/ops/tanh_backward_native.h>
#include <ATen/ops/tanh_cpu_dispatch.h>
#include <ATen/ops/tanh_native.h>
#include <ATen/ops/threshold_backward_cpu_dispatch.h>
#include <ATen/ops/threshold_backward_native.h>
#include <ATen/ops/threshold_cpu_dispatch.h>
#include <ATen/ops/threshold_native.h>
#include <ATen/ops/to_mkldnn_cpu_dispatch.h>
#include <ATen/ops/to_mkldnn_native.h>
#include <ATen/ops/topk_cpu_dispatch.h>
#include <ATen/ops/topk_native.h>
#include <ATen/ops/trace_cpu_dispatch.h>
#include <ATen/ops/trace_native.h>
#include <ATen/ops/triangular_solve_cpu_dispatch.h>
#include <ATen/ops/triangular_solve_native.h>
#include <ATen/ops/tril_cpu_dispatch.h>
#include <ATen/ops/tril_indices_cpu_dispatch.h>
#include <ATen/ops/tril_indices_native.h>
#include <ATen/ops/tril_native.h>
#include <ATen/ops/triu_cpu_dispatch.h>
#include <ATen/ops/triu_indices_cpu_dispatch.h>
#include <ATen/ops/triu_indices_native.h>
#include <ATen/ops/triu_native.h>
#include <ATen/ops/trunc_cpu_dispatch.h>
#include <ATen/ops/trunc_native.h>
#include <ATen/ops/unfold_backward_cpu_dispatch.h>
#include <ATen/ops/unfold_backward_native.h>
#include <ATen/ops/unfold_cpu_dispatch.h>
#include <ATen/ops/unfold_native.h>
#include <ATen/ops/uniform_cpu_dispatch.h>
#include <ATen/ops/uniform_native.h>
#include <ATen/ops/unique_consecutive_cpu_dispatch.h>
#include <ATen/ops/unique_consecutive_native.h>
#include <ATen/ops/unique_dim_consecutive_cpu_dispatch.h>
#include <ATen/ops/unique_dim_consecutive_native.h>
#include <ATen/ops/unique_dim_cpu_dispatch.h>
#include <ATen/ops/unique_dim_native.h>
#include <ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_bicubic2d_backward_native.h>
#include <ATen/ops/upsample_bicubic2d_cpu_dispatch.h>
#include <ATen/ops/upsample_bicubic2d_native.h>
#include <ATen/ops/upsample_bilinear2d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_bilinear2d_backward_native.h>
#include <ATen/ops/upsample_bilinear2d_cpu_dispatch.h>
#include <ATen/ops/upsample_bilinear2d_native.h>
#include <ATen/ops/upsample_linear1d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_linear1d_backward_native.h>
#include <ATen/ops/upsample_linear1d_cpu_dispatch.h>
#include <ATen/ops/upsample_linear1d_native.h>
#include <ATen/ops/upsample_nearest1d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest1d_backward_native.h>
#include <ATen/ops/upsample_nearest1d_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest1d_native.h>
#include <ATen/ops/upsample_nearest2d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest2d_backward_native.h>
#include <ATen/ops/upsample_nearest2d_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest2d_native.h>
#include <ATen/ops/upsample_nearest3d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest3d_backward_native.h>
#include <ATen/ops/upsample_nearest3d_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest3d_native.h>
#include <ATen/ops/upsample_trilinear3d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_trilinear3d_backward_native.h>
#include <ATen/ops/upsample_trilinear3d_cpu_dispatch.h>
#include <ATen/ops/upsample_trilinear3d_native.h>
#include <ATen/ops/var_cpu_dispatch.h>
#include <ATen/ops/var_mean_cpu_dispatch.h>
#include <ATen/ops/var_mean_native.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/vdot_cpu_dispatch.h>
#include <ATen/ops/vdot_native.h>
#include <ATen/ops/view_as_complex_cpu_dispatch.h>
#include <ATen/ops/view_as_complex_native.h>
#include <ATen/ops/view_as_real_cpu_dispatch.h>
#include <ATen/ops/view_as_real_native.h>
#include <ATen/ops/view_cpu_dispatch.h>
#include <ATen/ops/view_native.h>
#include <ATen/ops/where_cpu_dispatch.h>
#include <ATen/ops/where_native.h>
#include <ATen/ops/xlogy_cpu_dispatch.h>
#include <ATen/ops/xlogy_native.h>
#include <ATen/ops/zero_cpu_dispatch.h>
#include <ATen/ops/zero_native.h>

namespace at {
namespace {
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-function")

Tensor create_out(IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  if (strides.empty()) {
      return at::detail::empty_cpu(sizes, options);
  } else {
      return at::detail::empty_strided_cpu(sizes, strides, options);
  }
}

void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  TORCH_CHECK(options.dtype() == out.dtype(),
      "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
  TORCH_CHECK(options.device() == out.device(),
      "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
  const bool resized = at::native::resize_output(out, sizes);
  // Only restride if a resize occurred; otherwise we ignore the (advisory)
  // strides from the meta function and directly use the output tensor's
  // preexisting strides
  if (resized) {
    if (!strides.empty()) {
      TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
      // TODO: avoid the redispatch here
      out.as_strided_(sizes, strides);
    } else if (options.memory_format_opt().has_value()) {
      out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
    }
  }
}

void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
  // These checks are needed on those operators that:
  //   1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
  //   2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
  // For other operators (e.g. 'add'), 'TensorIterator' already checks
  // these things separately.
  TORCH_CHECK(options.dtype() == self.dtype(),
      "Bad in-place call: ",
      "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
  TORCH_CHECK(options.device() == self.device(),
      "Bad in-place call: ",
      "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
  TORCH_CHECK(sizes == self.sizes(),
      "Bad in-place call: ",
      "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
}

std::optional<Tensor> maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  if (out.strides() != strides) {
    return at::detail::empty_strided_cpu(sizes, strides, options);
  }
  return std::nullopt;
}
C10_DIAGNOSTIC_POP()
} // namespace
} // namespace at

// See template file RegisterDispatchDefinitions.ini
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
void wrapper_CPU___assert_async(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_assert_async_cpu(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_assert_async",
TORCH_FN(wrapper_CPU___assert_async));
}
} // anonymous namespace
namespace cpu {
void _assert_async(const at::Tensor & self) {
return wrapper_CPU___assert_async(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
void wrapper_CPU_msg__assert_async(const at::Tensor & self, c10::string_view assert_msg) {
    // No device check
  // DeviceGuard omitted
  return at::native::_assert_async_msg_cpu(self, assert_msg);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_assert_async.msg",
TORCH_FN(wrapper_CPU_msg__assert_async));
}
} // anonymous namespace
namespace cpu {
void _assert_async(const at::Tensor & self, c10::string_view assert_msg) {
return wrapper_CPU_msg__assert_async(self, assert_msg);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_abs_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::abs_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("abs.out",
TORCH_FN(wrapper_CPU_out_abs_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_out_abs_out(self, out);
}
at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_out_abs_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__view_as_real(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::view_as_real(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("view_as_real",
TORCH_FN(wrapper_CPU__view_as_real));
}
} // anonymous namespace
namespace cpu {
at::Tensor view_as_real(const at::Tensor & self) {
return wrapper_CPU__view_as_real(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_acos_out_functional final : public at::native::structured_acos_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_acos(const at::Tensor & self) {
structured_acos_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_acos_out_out final : public at::native::structured_acos_out {
    structured_acos_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_acos_out_out(const at::Tensor & self, at::Tensor & out) {
structured_acos_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_acos_out_inplace final : public at::native::structured_acos_out {
    structured_acos_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_acos_(at::Tensor & self) {
structured_acos_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("acos", TORCH_FN(wrapper_CPU_acos));
m.impl("acos.out", TORCH_FN(wrapper_CPU_acos_out_out));
m.impl("acos_", TORCH_FN(wrapper_CPU_acos_));
}
} // anonymous namespace
namespace cpu {
at::Tensor acos(const at::Tensor & self) {
return wrapper_CPU_acos(self);
}
at::Tensor & acos_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_acos_out_out(self, out);
}
at::Tensor & acos_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_acos_out_out(self, out);
}
at::Tensor & acos_(at::Tensor & self) {
return wrapper_CPU_acos_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_start_out_arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::arange_out(start, end, step, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("arange.start_out",
TORCH_FN(wrapper_CPU_start_out_arange_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & arange_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step) {
return wrapper_CPU_start_out_arange_out(start, end, step, out);
}
at::Tensor & arange_outf(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
return wrapper_CPU_start_out_arange_out(start, end, step, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_atanh_out_functional final : public at::native::structured_atanh_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_atanh(const at::Tensor & self) {
structured_atanh_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_atanh_out_out final : public at::native::structured_atanh_out {
    structured_atanh_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_atanh_out_out(const at::Tensor & self, at::Tensor & out) {
structured_atanh_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_atanh_out_inplace final : public at::native::structured_atanh_out {
    structured_atanh_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_atanh_(at::Tensor & self) {
structured_atanh_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("atanh", TORCH_FN(wrapper_CPU_atanh));
m.impl("atanh.out", TORCH_FN(wrapper_CPU_atanh_out_out));
m.impl("atanh_", TORCH_FN(wrapper_CPU_atanh_));
}
} // anonymous namespace
namespace cpu {
at::Tensor atanh(const at::Tensor & self) {
return wrapper_CPU_atanh(self);
}
at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_atanh_out_out(self, out);
}
at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_atanh_out_out(self, out);
}
at::Tensor & atanh_(at::Tensor & self) {
return wrapper_CPU_atanh_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_asin_out_functional final : public at::native::structured_asin_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_asin(const at::Tensor & self) {
structured_asin_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_asin_out_out final : public at::native::structured_asin_out {
    structured_asin_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_asin_out_out(const at::Tensor & self, at::Tensor & out) {
structured_asin_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_asin_out_inplace final : public at::native::structured_asin_out {
    structured_asin_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_asin_(at::Tensor & self) {
structured_asin_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("asin", TORCH_FN(wrapper_CPU_asin));
m.impl("asin.out", TORCH_FN(wrapper_CPU_asin_out_out));
m.impl("asin_", TORCH_FN(wrapper_CPU_asin_));
}
} // anonymous namespace
namespace cpu {
at::Tensor asin(const at::Tensor & self) {
return wrapper_CPU_asin(self);
}
at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_asin_out_out(self, out);
}
at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_asin_out_out(self, out);
}
at::Tensor & asin_(at::Tensor & self) {
return wrapper_CPU_asin_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    // No device check
  // DeviceGuard omitted
  return at::native::binary_cross_entropy_backward_cpu(grad_output, self, target, weight, reduction);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_grad_input_binary_cross_entropy_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::binary_cross_entropy_backward_out_cpu(grad_output, self, target, weight, reduction, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("binary_cross_entropy_backward",
TORCH_FN(wrapper_CPU__binary_cross_entropy_backward));
m.impl("binary_cross_entropy_backward.grad_input",
TORCH_FN(wrapper_CPU_grad_input_binary_cross_entropy_backward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
return wrapper_CPU__binary_cross_entropy_backward(grad_output, self, target, weight, reduction);
}
at::Tensor & binary_cross_entropy_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
return wrapper_CPU_grad_input_binary_cross_entropy_backward_out(grad_output, self, target, weight, reduction, grad_input);
}
at::Tensor & binary_cross_entropy_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
return wrapper_CPU_grad_input_binary_cross_entropy_backward_out(grad_output, self, target, weight, reduction, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_logical_not_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::logical_not_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("logical_not.out",
TORCH_FN(wrapper_CPU_out_logical_not_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & logical_not_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_out_logical_not_out(self, out);
}
at::Tensor & logical_not_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_out_logical_not_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_logical_and_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::logical_and_out(self, other, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("logical_and.out",
TORCH_FN(wrapper_CPU_out_logical_and_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & logical_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_out_logical_and_out(self, other, out);
}
at::Tensor & logical_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_out_logical_and_out(self, other, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_cosh_out_functional final : public at::native::structured_cosh_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_cosh(const at::Tensor & self) {
structured_cosh_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_cosh_out_out final : public at::native::structured_cosh_out {
    structured_cosh_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_cosh_out_out(const at::Tensor & self, at::Tensor & out) {
structured_cosh_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_cosh_out_inplace final : public at::native::structured_cosh_out {
    structured_cosh_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_cosh_(at::Tensor & self) {
structured_cosh_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("cosh", TORCH_FN(wrapper_CPU_cosh));
m.impl("cosh.out", TORCH_FN(wrapper_CPU_cosh_out_out));
m.impl("cosh_", TORCH_FN(wrapper_CPU_cosh_));
}
} // anonymous namespace
namespace cpu {
at::Tensor cosh(const at::Tensor & self) {
return wrapper_CPU_cosh(self);
}
at::Tensor & cosh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_cosh_out_out(self, out);
}
at::Tensor & cosh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_cosh_out_out(self, out);
}
at::Tensor & cosh_(at::Tensor & self) {
return wrapper_CPU_cosh_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_cumprod_out_functional final : public at::native::structured_cumprod_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_cumprod(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
structured_cumprod_out_functional op;
op.meta(self, dim, dtype);
op.impl(self, dim, dtype, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_cumprod_out_out final : public at::native::structured_cumprod_out {
    structured_cumprod_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_cumprod_out_out(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
structured_cumprod_out_out op(out);
op.meta(self, dim, dtype);
op.impl(self, dim, dtype, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_cumprod_out_inplace final : public at::native::structured_cumprod_out {
    structured_cumprod_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_cumprod_(at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
structured_cumprod_out_inplace op(self);
op.meta(self, dim, dtype);
op.impl(self, dim, dtype, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("cumprod", TORCH_FN(wrapper_CPU_cumprod));
m.impl("cumprod.out", TORCH_FN(wrapper_CPU_cumprod_out_out));
m.impl("cumprod_", TORCH_FN(wrapper_CPU_cumprod_));
}
} // anonymous namespace
namespace cpu {
at::Tensor cumprod(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_cumprod(self, dim, dtype);
}
at::Tensor & cumprod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_cumprod_out_out(self, dim, dtype, out);
}
at::Tensor & cumprod_outf(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
return wrapper_CPU_cumprod_out_out(self, dim, dtype, out);
}
at::Tensor & cumprod_(at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_cumprod_(self, dim, dtype);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_div_out_functional final : public at::native::structured_div_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_div_Tensor(const at::Tensor & self, const at::Tensor & other) {
structured_div_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_div_out_out final : public at::native::structured_div_out {
    structured_div_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_div_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_div_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_div_out_inplace final : public at::native::structured_div_out {
    structured_div_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_div__Tensor(at::Tensor & self, const at::Tensor & other) {
structured_div_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("div.Tensor", TORCH_FN(wrapper_CPU_div_Tensor));
m.impl("div.out", TORCH_FN(wrapper_CPU_div_out_out));
m.impl("div_.Tensor", TORCH_FN(wrapper_CPU_div__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor div(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_div_Tensor(self, other);
}
at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_div_out_out(self, other, out);
}
at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_div_out_out(self, other, out);
}
at::Tensor & div_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_div__Tensor(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_div_out_mode_functional final : public at::native::structured_div_out_mode {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_div_Tensor_mode(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
structured_div_out_mode_functional op;
op.meta(self, other, rounding_mode);
op.impl(self, other, rounding_mode, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_div_out_mode_out final : public at::native::structured_div_out_mode {
    structured_div_out_mode_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_div_out_out_mode(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
structured_div_out_mode_out op(out);
op.meta(self, other, rounding_mode);
op.impl(self, other, rounding_mode, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_div_out_mode_inplace final : public at::native::structured_div_out_mode {
    structured_div_out_mode_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_div__Tensor_mode(at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
structured_div_out_mode_inplace op(self);
op.meta(self, other, rounding_mode);
op.impl(self, other, rounding_mode, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("div.Tensor_mode", TORCH_FN(wrapper_CPU_div_Tensor_mode));
m.impl("div.out_mode", TORCH_FN(wrapper_CPU_div_out_out_mode));
m.impl("div_.Tensor_mode", TORCH_FN(wrapper_CPU_div__Tensor_mode));
}
} // anonymous namespace
namespace cpu {
at::Tensor div(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
return wrapper_CPU_div_Tensor_mode(self, other, rounding_mode);
}
at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
return wrapper_CPU_div_out_out_mode(self, other, rounding_mode, out);
}
at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
return wrapper_CPU_div_out_out_mode(self, other, rounding_mode, out);
}
at::Tensor & div_(at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
return wrapper_CPU_div__Tensor_mode(self, other, rounding_mode);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
    // No device check
  // DeviceGuard omitted
  return at::native::_embedding_bag_cpu(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_embedding_bag",
TORCH_FN(wrapper_CPU___embedding_bag));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
return wrapper_CPU___embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___embedding_bag_per_sample_weights_backward(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
    // No device check
  // DeviceGuard omitted
  return at::native::_embedding_bag_per_sample_weights_backward_cpu(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_embedding_bag_per_sample_weights_backward",
TORCH_FN(wrapper_CPU___embedding_bag_per_sample_weights_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _embedding_bag_per_sample_weights_backward(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
return wrapper_CPU___embedding_bag_per_sample_weights_backward(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__empty_strided(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    // No device check
  // DeviceGuard omitted
  return at::native::empty_strided_cpu(C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride), dtype, layout, device, pin_memory);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("empty_strided",
TORCH_FN(wrapper_CPU__empty_strided));
}
} // anonymous namespace
namespace cpu {
at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options) {
return wrapper_CPU__empty_strided(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
}
at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
return wrapper_CPU__empty_strided(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory);
}
at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options) {
return wrapper_CPU__empty_strided(size, stride, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
}
at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
return wrapper_CPU__empty_strided(size, stride, dtype, layout, device, pin_memory);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_exp_out_functional final : public at::native::structured_exp_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_exp(const at::Tensor & self) {
structured_exp_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_exp_out_out final : public at::native::structured_exp_out {
    structured_exp_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_exp_out_out(const at::Tensor & self, at::Tensor & out) {
structured_exp_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_exp_out_inplace final : public at::native::structured_exp_out {
    structured_exp_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_exp_(at::Tensor & self) {
structured_exp_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("exp", TORCH_FN(wrapper_CPU_exp));
m.impl("exp.out", TORCH_FN(wrapper_CPU_exp_out_out));
m.impl("exp_", TORCH_FN(wrapper_CPU_exp_));
}
} // anonymous namespace
namespace cpu {
at::Tensor exp(const at::Tensor & self) {
return wrapper_CPU_exp(self);
}
at::Tensor & exp_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_exp_out_out(self, out);
}
at::Tensor & exp_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_exp_out_out(self, out);
}
at::Tensor & exp_(at::Tensor & self) {
return wrapper_CPU_exp_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_expm1_out_functional final : public at::native::structured_expm1_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_expm1(const at::Tensor & self) {
structured_expm1_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_expm1_out_out final : public at::native::structured_expm1_out {
    structured_expm1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_expm1_out_out(const at::Tensor & self, at::Tensor & out) {
structured_expm1_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_expm1_out_inplace final : public at::native::structured_expm1_out {
    structured_expm1_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_expm1_(at::Tensor & self) {
structured_expm1_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("expm1", TORCH_FN(wrapper_CPU_expm1));
m.impl("expm1.out", TORCH_FN(wrapper_CPU_expm1_out_out));
m.impl("expm1_", TORCH_FN(wrapper_CPU_expm1_));
}
} // anonymous namespace
namespace cpu {
at::Tensor expm1(const at::Tensor & self) {
return wrapper_CPU_expm1(self);
}
at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_expm1_out_out(self, out);
}
at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_expm1_out_out(self, out);
}
at::Tensor & expm1_(at::Tensor & self) {
return wrapper_CPU_expm1_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_Scalar_fill_(at::Tensor & self, const at::Scalar & value) {
    // No device check
  // DeviceGuard omitted
  return at::native::fill_(self, value);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("fill_.Scalar",
TORCH_FN(wrapper_CPU_Scalar_fill_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & fill_(at::Tensor & self, const at::Scalar & value) {
return wrapper_CPU_Scalar_fill_(self, value);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_Tensor_fill_(at::Tensor & self, const at::Tensor & value) {
    // No device check
  // DeviceGuard omitted
  return at::native::fill_(self, value);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("fill_.Tensor",
TORCH_FN(wrapper_CPU_Tensor_fill_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & fill_(at::Tensor & self, const at::Tensor & value) {
return wrapper_CPU_Tensor_fill_(self, value);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_floor_out_functional final : public at::native::structured_floor_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_floor(const at::Tensor & self) {
structured_floor_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_floor_out_out final : public at::native::structured_floor_out {
    structured_floor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_floor_out_out(const at::Tensor & self, at::Tensor & out) {
structured_floor_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_floor_out_inplace final : public at::native::structured_floor_out {
    structured_floor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_floor_(at::Tensor & self) {
structured_floor_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("floor", TORCH_FN(wrapper_CPU_floor));
m.impl("floor.out", TORCH_FN(wrapper_CPU_floor_out_out));
m.impl("floor_", TORCH_FN(wrapper_CPU_floor_));
}
} // anonymous namespace
namespace cpu {
at::Tensor floor(const at::Tensor & self) {
return wrapper_CPU_floor(self);
}
at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_floor_out_out(self, out);
}
at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_floor_out_out(self, out);
}
at::Tensor & floor_(at::Tensor & self) {
return wrapper_CPU_floor_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__floor_divide(const at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_divide(self, other);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_floor_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_divide_out(self, other, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_Tensor_floor_divide_(at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_divide_(self, other);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("floor_divide",
TORCH_FN(wrapper_CPU__floor_divide));
m.impl("floor_divide.out",
TORCH_FN(wrapper_CPU_out_floor_divide_out));
m.impl("floor_divide_.Tensor",
TORCH_FN(wrapper_CPU_Tensor_floor_divide_));
}
} // anonymous namespace
namespace cpu {
at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU__floor_divide(self, other);
}
at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_out_floor_divide_out(self, other, out);
}
at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_out_floor_divide_out(self, other, out);
}
at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_Tensor_floor_divide_(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__from_file(c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    // No device check
  // DeviceGuard omitted
  return at::native::from_file(filename, shared, size, dtype, layout, device, pin_memory);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("from_file",
TORCH_FN(wrapper_CPU__from_file));
}
} // anonymous namespace
namespace cpu {
at::Tensor from_file(c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, at::TensorOptions options) {
return wrapper_CPU__from_file(filename, shared, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
}
at::Tensor from_file(c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
return wrapper_CPU__from_file(filename, shared, size, dtype, layout, device, pin_memory);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    // No device check
  // DeviceGuard omitted
  return at::native::grid_sampler_2d_cpu(input, grid, interpolation_mode, padding_mode, align_corners);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("grid_sampler_2d",
TORCH_FN(wrapper_CPU__grid_sampler_2d));
}
} // anonymous namespace
namespace cpu {
at::Tensor grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
return wrapper_CPU__grid_sampler_2d(input, grid, interpolation_mode, padding_mode, align_corners);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__grid_sampler_3d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    // No device check
  // DeviceGuard omitted
  return at::native::grid_sampler_3d_cpu(input, grid, interpolation_mode, padding_mode, align_corners);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("grid_sampler_3d",
TORCH_FN(wrapper_CPU__grid_sampler_3d));
}
} // anonymous namespace
namespace cpu {
at::Tensor grid_sampler_3d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
return wrapper_CPU__grid_sampler_3d(input, grid, interpolation_mode, padding_mode, align_corners);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__native_group_norm(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
    // No device check
  // DeviceGuard omitted
  return at::native::native_group_norm(input, weight, bias, N.guard_int(__FILE__, __LINE__), C.guard_int(__FILE__, __LINE__), HxW.guard_int(__FILE__, __LINE__), group, eps);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("native_group_norm",
TORCH_FN(wrapper_CPU__native_group_norm));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) {
return wrapper_CPU__native_group_norm(input, weight, bias, N, C, HxW, group, eps);
}
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_symint(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
return wrapper_CPU__native_group_norm(input, weight, bias, N, C, HxW, group, eps);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___fft_c2r(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fft_c2r_mkl(self, dim, normalization, last_dim_size.guard_int(__FILE__, __LINE__));
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out__fft_c2r_out(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fft_c2r_mkl_out(self, dim, normalization, last_dim_size.guard_int(__FILE__, __LINE__), out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fft_c2r",
TORCH_FN(wrapper_CPU___fft_c2r));
m.impl("_fft_c2r.out",
TORCH_FN(wrapper_CPU_out__fft_c2r_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _fft_c2r(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
return wrapper_CPU___fft_c2r(self, dim, normalization, last_dim_size);
}
at::Tensor _fft_c2r_symint(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size) {
return wrapper_CPU___fft_c2r(self, dim, normalization, last_dim_size);
}
at::Tensor & _fft_c2r_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
return wrapper_CPU_out__fft_c2r_out(self, dim, normalization, last_dim_size, out);
}
at::Tensor & _fft_c2r_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out) {
return wrapper_CPU_out__fft_c2r_out(self, dim, normalization, last_dim_size, out);
}
at::Tensor & _fft_c2r_symint_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size) {
return wrapper_CPU_out__fft_c2r_out(self, dim, normalization, last_dim_size, out);
}
at::Tensor & _fft_c2r_symint_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size, at::Tensor & out) {
return wrapper_CPU_out__fft_c2r_out(self, dim, normalization, last_dim_size, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
void wrapper_CPU___validate_compressed_sparse_indices(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
    // No device check
  // DeviceGuard omitted
  return at::native::_validate_compressed_sparse_indices_cpu(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_validate_compressed_sparse_indices",
TORCH_FN(wrapper_CPU___validate_compressed_sparse_indices));
}
} // anonymous namespace
namespace cpu {
void _validate_compressed_sparse_indices(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
return wrapper_CPU___validate_compressed_sparse_indices(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__isnan(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::isnan(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("isnan",
TORCH_FN(wrapper_CPU__isnan));
}
} // anonymous namespace
namespace cpu {
at::Tensor isnan(const at::Tensor & self) {
return wrapper_CPU__isnan(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_log2_out_functional final : public at::native::structured_log2_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_log2(const at::Tensor & self) {
structured_log2_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_log2_out_out final : public at::native::structured_log2_out {
    structured_log2_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_log2_out_out(const at::Tensor & self, at::Tensor & out) {
structured_log2_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_log2_out_inplace final : public at::native::structured_log2_out {
    structured_log2_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_log2_(at::Tensor & self) {
structured_log2_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("log2", TORCH_FN(wrapper_CPU_log2));
m.impl("log2.out", TORCH_FN(wrapper_CPU_log2_out_out));
m.impl("log2_", TORCH_FN(wrapper_CPU_log2_));
}
} // anonymous namespace
namespace cpu {
at::Tensor log2(const at::Tensor & self) {
return wrapper_CPU_log2(self);
}
at::Tensor & log2_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_log2_out_out(self, out);
}
at::Tensor & log2_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_log2_out_out(self, out);
}
at::Tensor & log2_(at::Tensor & self) {
return wrapper_CPU_log2_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_logaddexp2_out_functional final : public at::native::structured_logaddexp2_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_logaddexp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_logaddexp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_logaddexp2(const at::Tensor & self, const at::Tensor & other) {
structured_logaddexp2_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_logaddexp2_out_out final : public at::native::structured_logaddexp2_out {
    structured_logaddexp2_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_logaddexp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_logaddexp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_logaddexp2_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_logaddexp2_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("logaddexp2", TORCH_FN(wrapper_CPU_logaddexp2));
m.impl("logaddexp2.out", TORCH_FN(wrapper_CPU_logaddexp2_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_logaddexp2(self, other);
}
at::Tensor & logaddexp2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_logaddexp2_out_out(self, other, out);
}
at::Tensor & logaddexp2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_logaddexp2_out_out(self, other, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_xlogy_out_functional final : public at::native::structured_xlogy_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_xlogy_Tensor(const at::Tensor & self, const at::Tensor & other) {
structured_xlogy_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_xlogy_out_out final : public at::native::structured_xlogy_out {
    structured_xlogy_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_xlogy_out_OutTensor(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_xlogy_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_xlogy_out_inplace final : public at::native::structured_xlogy_out {
    structured_xlogy_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_xlogy__Tensor(at::Tensor & self, const at::Tensor & other) {
structured_xlogy_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("xlogy.Tensor", TORCH_FN(wrapper_CPU_xlogy_Tensor));
m.impl("xlogy.OutTensor", TORCH_FN(wrapper_CPU_xlogy_out_OutTensor));
m.impl("xlogy_.Tensor", TORCH_FN(wrapper_CPU_xlogy__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor xlogy(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_xlogy_Tensor(self, other);
}
at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_xlogy_out_OutTensor(self, other, out);
}
at::Tensor & xlogy_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_xlogy_out_OutTensor(self, other, out);
}
at::Tensor & xlogy_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_xlogy__Tensor(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_logspace_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::logspace_out(start, end, steps, base, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("logspace.out",
TORCH_FN(wrapper_CPU_out_logspace_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & logspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base) {
return wrapper_CPU_out_logspace_out(start, end, steps, base, out);
}
at::Tensor & logspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
return wrapper_CPU_out_logspace_out(start, end, steps, base, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___aminmax(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_aminmax_all(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_aminmax",
TORCH_FN(wrapper_CPU___aminmax));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _aminmax(const at::Tensor & self) {
return wrapper_CPU___aminmax(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_dim__aminmax(const at::Tensor & self, int64_t dim, bool keepdim) {
    // No device check
  // DeviceGuard omitted
  return at::native::_aminmax(self, dim, keepdim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_aminmax.dim",
TORCH_FN(wrapper_CPU_dim__aminmax));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _aminmax(const at::Tensor & self, int64_t dim, bool keepdim) {
return wrapper_CPU_dim__aminmax(self, dim, keepdim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_aminmax_out_functional final : public at::native::structured_aminmax_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 2> outputs_;
};
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_aminmax(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
structured_aminmax_out_functional op;
op.meta(self, dim, keepdim);
op.impl(self, dim, keepdim, op.outputs_[0], op.outputs_[1]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]));
}
struct structured_aminmax_out_out final : public at::native::structured_aminmax_out {
    structured_aminmax_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 2> outputs_;
    std::array<::std::optional<Tensor>, 2> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_aminmax_out_out(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
structured_aminmax_out_out op(min, max);
op.meta(self, dim, keepdim);
op.impl(self, dim, keepdim, op.maybe_get_output(0), op.maybe_get_output(1));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
return std::forward_as_tuple(min, max);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("aminmax", TORCH_FN(wrapper_CPU_aminmax));
m.impl("aminmax.out", TORCH_FN(wrapper_CPU_aminmax_out_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> aminmax(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
return wrapper_CPU_aminmax(self, dim, keepdim);
}
::std::tuple<at::Tensor &,at::Tensor &> aminmax_out(at::Tensor & min, at::Tensor & max, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
return wrapper_CPU_aminmax_out_out(self, dim, keepdim, min, max);
}
::std::tuple<at::Tensor &,at::Tensor &> aminmax_outf(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
return wrapper_CPU_aminmax_out_out(self, dim, keepdim, min, max);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___compute_linear_combination(const at::Tensor & input, const at::Tensor & coefficients) {
    // No device check
  // DeviceGuard omitted
  return at::native::_compute_linear_combination(input, coefficients);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out__compute_linear_combination_out(const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::_compute_linear_combination_out(input, coefficients, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_compute_linear_combination",
TORCH_FN(wrapper_CPU___compute_linear_combination));
m.impl("_compute_linear_combination.out",
TORCH_FN(wrapper_CPU_out__compute_linear_combination_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _compute_linear_combination(const at::Tensor & input, const at::Tensor & coefficients) {
return wrapper_CPU___compute_linear_combination(input, coefficients);
}
at::Tensor & _compute_linear_combination_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & coefficients) {
return wrapper_CPU_out__compute_linear_combination_out(input, coefficients, out);
}
at::Tensor & _compute_linear_combination_outf(const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) {
return wrapper_CPU_out__compute_linear_combination_out(input, coefficients, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_min_out_functional final : public at::native::structured_min_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 2> outputs_;
};
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_min_dim(const at::Tensor & self, int64_t dim, bool keepdim) {
structured_min_out_functional op;
auto precompute = op.meta(self, dim, keepdim);
(void)precompute;
op.impl(self, precompute.dim, keepdim, op.outputs_[0], op.outputs_[1]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]));
}
struct structured_min_out_out final : public at::native::structured_min_out {
    structured_min_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 2> outputs_;
    std::array<::std::optional<Tensor>, 2> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_min_out_dim_min(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
structured_min_out_out op(min, min_indices);
auto precompute = op.meta(self, dim, keepdim);
(void)precompute;
op.impl(self, precompute.dim, keepdim, op.maybe_get_output(0), op.maybe_get_output(1));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
return std::forward_as_tuple(min, min_indices);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("min.dim", TORCH_FN(wrapper_CPU_min_dim));
m.impl("min.dim_min", TORCH_FN(wrapper_CPU_min_out_dim_min));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> min(const at::Tensor & self, int64_t dim, bool keepdim) {
return wrapper_CPU_min_dim(self, dim, keepdim);
}
::std::tuple<at::Tensor &,at::Tensor &> min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim) {
return wrapper_CPU_min_out_dim_min(self, dim, keepdim, min, min_indices);
}
::std::tuple<at::Tensor &,at::Tensor &> min_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
return wrapper_CPU_min_out_dim_min(self, dim, keepdim, min, min_indices);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___convert_weight_to_int4pack_for_cpu(const at::Tensor & self, int64_t innerKTiles) {
    // No device check
  // DeviceGuard omitted
  return at::native::_convert_weight_to_int4pack_cpu(self, innerKTiles);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_convert_weight_to_int4pack_for_cpu",
TORCH_FN(wrapper_CPU___convert_weight_to_int4pack_for_cpu));
}
} // anonymous namespace
namespace cpu {
at::Tensor _convert_weight_to_int4pack_for_cpu(const at::Tensor & self, int64_t innerKTiles) {
return wrapper_CPU___convert_weight_to_int4pack_for_cpu(self, innerKTiles);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_out__native_batch_norm_legit_out(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    // No device check
  // DeviceGuard omitted
  return at::native::_batch_norm_legit_cpu_out(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
}
} // anonymous namespace
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___native_batch_norm_legit(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
    // No device check
  // DeviceGuard omitted
  return at::native::_batch_norm_legit_cpu(input, weight, bias, running_mean, running_var, training, momentum, eps);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_native_batch_norm_legit.out",
TORCH_FN(wrapper_CPU_out__native_batch_norm_legit_out));
m.impl("_native_batch_norm_legit",
TORCH_FN(wrapper_CPU___native_batch_norm_legit));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
return wrapper_CPU_out__native_batch_norm_legit_out(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_outf(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
return wrapper_CPU_out__native_batch_norm_legit_out(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
}
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
return wrapper_CPU___native_batch_norm_legit(input, weight, bias, running_mean, running_var, training, momentum, eps);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_no_stats__native_batch_norm_legit(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
    // No device check
  // DeviceGuard omitted
  return at::native::_batch_norm_legit_no_stats_cpu(input, weight, bias, training, momentum, eps);
}
} // anonymous namespace
namespace {
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_no_stats_out__native_batch_norm_legit_out(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    // No device check
  // DeviceGuard omitted
  return at::native::_batch_norm_legit_no_stats_cpu_out(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_native_batch_norm_legit.no_stats",
TORCH_FN(wrapper_CPU_no_stats__native_batch_norm_legit));
m.impl("_native_batch_norm_legit.no_stats_out",
TORCH_FN(wrapper_CPU_no_stats_out__native_batch_norm_legit_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
return wrapper_CPU_no_stats__native_batch_norm_legit(input, weight, bias, training, momentum, eps);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
return wrapper_CPU_no_stats_out__native_batch_norm_legit_out(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_outf(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
return wrapper_CPU_no_stats_out__native_batch_norm_legit_out(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___cdist_forward(const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional<int64_t> compute_mode) {
    // No device check
  // DeviceGuard omitted
  return at::native::_cdist_forward(x1, x2, p, compute_mode);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_cdist_forward",
TORCH_FN(wrapper_CPU___cdist_forward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _cdist_forward(const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional<int64_t> compute_mode) {
return wrapper_CPU___cdist_forward(x1, x2, p, compute_mode);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
    // No device check
  // DeviceGuard omitted
  return at::native::_cdist_backward(grad, x1, x2, p, cdist);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_cdist_backward",
TORCH_FN(wrapper_CPU___cdist_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
return wrapper_CPU___cdist_backward(grad, x1, x2, p, cdist);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__pixel_unshuffle(const at::Tensor & self, int64_t downscale_factor) {
    // No device check
  // DeviceGuard omitted
  return at::native::pixel_unshuffle_cpu(self, downscale_factor);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("pixel_unshuffle",
TORCH_FN(wrapper_CPU__pixel_unshuffle));
}
} // anonymous namespace
namespace cpu {
at::Tensor pixel_unshuffle(const at::Tensor & self, int64_t downscale_factor) {
return wrapper_CPU__pixel_unshuffle(self, downscale_factor);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__native_channel_shuffle(const at::Tensor & self, c10::SymInt groups) {
    // No device check
  // DeviceGuard omitted
  return at::native::channel_shuffle_cpu(self, groups.guard_int(__FILE__, __LINE__));
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("native_channel_shuffle",
TORCH_FN(wrapper_CPU__native_channel_shuffle));
}
} // anonymous namespace
namespace cpu {
at::Tensor native_channel_shuffle(const at::Tensor & self, int64_t groups) {
return wrapper_CPU__native_channel_shuffle(self, groups);
}
at::Tensor native_channel_shuffle_symint(const at::Tensor & self, c10::SymInt groups) {
return wrapper_CPU__native_channel_shuffle(self, groups);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__relu(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::relu(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU__relu_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::relu_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("relu",
TORCH_FN(wrapper_CPU__relu));
m.impl("relu_",
TORCH_FN(wrapper_CPU__relu_));
}
} // anonymous namespace
namespace cpu {
at::Tensor relu(const at::Tensor & self) {
return wrapper_CPU__relu(self);
}
at::Tensor & relu_(at::Tensor & self) {
return wrapper_CPU__relu_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__logit(const at::Tensor & self, ::std::optional<double> eps) {
    // No device check
  // DeviceGuard omitted
  return at::native::logit(self, eps);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_logit_out(const at::Tensor & self, ::std::optional<double> eps, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::logit_out(self, eps, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU__logit_(at::Tensor & self, ::std::optional<double> eps) {
    // No device check
  // DeviceGuard omitted
  return at::native::logit_(self, eps);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("logit",
TORCH_FN(wrapper_CPU__logit));
m.impl("logit.out",
TORCH_FN(wrapper_CPU_out_logit_out));
m.impl("logit_",
TORCH_FN(wrapper_CPU__logit_));
}
} // anonymous namespace
namespace cpu {
at::Tensor logit(const at::Tensor & self, ::std::optional<double> eps) {
return wrapper_CPU__logit(self, eps);
}
at::Tensor & logit_out(at::Tensor & out, const at::Tensor & self, ::std::optional<double> eps) {
return wrapper_CPU_out_logit_out(self, eps, out);
}
at::Tensor & logit_outf(const at::Tensor & self, ::std::optional<double> eps, at::Tensor & out) {
return wrapper_CPU_out_logit_out(self, eps, out);
}
at::Tensor & logit_(at::Tensor & self, ::std::optional<double> eps) {
return wrapper_CPU__logit_(self, eps);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__nansum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::nansum(self, dim, keepdim, dtype);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_nansum_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::nansum_out(self, dim, keepdim, dtype, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("nansum",
TORCH_FN(wrapper_CPU__nansum));
m.impl("nansum.out",
TORCH_FN(wrapper_CPU_out_nansum_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor nansum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU__nansum(self, dim, keepdim, dtype);
}
at::Tensor & nansum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_out_nansum_out(self, dim, keepdim, dtype, out);
}
at::Tensor & nansum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
return wrapper_CPU_out_nansum_out(self, dim, keepdim, dtype, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_sqrt_out_functional final : public at::native::structured_sqrt_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_sqrt(const at::Tensor & self) {
structured_sqrt_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_sqrt_out_out final : public at::native::structured_sqrt_out {
    structured_sqrt_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_sqrt_out_out(const at::Tensor & self, at::Tensor & out) {
structured_sqrt_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_sqrt_out_inplace final : public at::native::structured_sqrt_out {
    structured_sqrt_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_sqrt_(at::Tensor & self) {
structured_sqrt_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("sqrt", TORCH_FN(wrapper_CPU_sqrt));
m.impl("sqrt.out", TORCH_FN(wrapper_CPU_sqrt_out_out));
m.impl("sqrt_", TORCH_FN(wrapper_CPU_sqrt_));
}
} // anonymous namespace
namespace cpu {
at::Tensor sqrt(const at::Tensor & self) {
return wrapper_CPU_sqrt(self);
}
at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_sqrt_out_out(self, out);
}
at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_sqrt_out_out(self, out);
}
at::Tensor & sqrt_(at::Tensor & self) {
return wrapper_CPU_sqrt_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__prod(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::prod(self, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("prod",
TORCH_FN(wrapper_CPU__prod));
}
} // anonymous namespace
namespace cpu {
at::Tensor prod(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU__prod(self, dtype);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_prod_out_functional final : public at::native::structured_prod_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_prod_dim_int(const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
structured_prod_out_functional op;
op.meta(self, dim, keepdim, dtype);
op.impl(self, dim, keepdim, dtype, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_prod_out_out final : public at::native::structured_prod_out {
    structured_prod_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_prod_out_int_out(const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
structured_prod_out_out op(out);
op.meta(self, dim, keepdim, dtype);
op.impl(self, dim, keepdim, dtype, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("prod.dim_int", TORCH_FN(wrapper_CPU_prod_dim_int));
m.impl("prod.int_out", TORCH_FN(wrapper_CPU_prod_out_int_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_prod_dim_int(self, dim, keepdim, dtype);
}
at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_prod_out_int_out(self, dim, keepdim, dtype, out);
}
at::Tensor & prod_outf(const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
return wrapper_CPU_prod_out_int_out(self, dim, keepdim, dtype, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_threshold_backward_out_functional final : public at::native::structured_threshold_backward_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_threshold_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_threshold_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
structured_threshold_backward_out_functional op;
op.meta(grad_output, self, threshold);
op.impl(grad_output, self, threshold, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_threshold_backward_out_out final : public at::native::structured_threshold_backward_out {
    structured_threshold_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_threshold_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_threshold_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_threshold_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
structured_threshold_backward_out_out op(grad_input);
op.meta(grad_output, self, threshold);
op.impl(grad_output, self, threshold, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("threshold_backward", TORCH_FN(wrapper_CPU_threshold_backward));
m.impl("threshold_backward.grad_input", TORCH_FN(wrapper_CPU_threshold_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
return wrapper_CPU_threshold_backward(grad_output, self, threshold);
}
at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
return wrapper_CPU_threshold_backward_out_grad_input(grad_output, self, threshold, grad_input);
}
at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
return wrapper_CPU_threshold_backward_out_grad_input(grad_output, self, threshold, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___transform_bias_rescale_qkv(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
    // No device check
  // DeviceGuard omitted
  return at::native::transform_bias_rescale_qkv_cpu(qkv, qkv_bias, num_heads);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_transform_bias_rescale_qkv",
TORCH_FN(wrapper_CPU___transform_bias_rescale_qkv));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
return wrapper_CPU___transform_bias_rescale_qkv(qkv, qkv_bias, num_heads);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___nested_from_padded(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
    // No device check
  // DeviceGuard omitted
  return at::native::nested_from_padded_generic(padded, cpu_nested_shape_example, fuse_transform_0213);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_nested_from_padded",
TORCH_FN(wrapper_CPU___nested_from_padded));
}
} // anonymous namespace
namespace cpu {
at::Tensor _nested_from_padded(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
return wrapper_CPU___nested_from_padded(padded, cpu_nested_shape_example, fuse_transform_0213);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_trunc_out_functional final : public at::native::structured_trunc_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_trunc(const at::Tensor & self) {
structured_trunc_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_trunc_out_out final : public at::native::structured_trunc_out {
    structured_trunc_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_trunc_out_out(const at::Tensor & self, at::Tensor & out) {
structured_trunc_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_trunc_out_inplace final : public at::native::structured_trunc_out {
    structured_trunc_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_trunc_(at::Tensor & self) {
structured_trunc_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("trunc", TORCH_FN(wrapper_CPU_trunc));
m.impl("trunc.out", TORCH_FN(wrapper_CPU_trunc_out_out));
m.impl("trunc_", TORCH_FN(wrapper_CPU_trunc_));
}
} // anonymous namespace
namespace cpu {
at::Tensor trunc(const at::Tensor & self) {
return wrapper_CPU_trunc(self);
}
at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_trunc_out_out(self, out);
}
at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_trunc_out_out(self, out);
}
at::Tensor & trunc_(at::Tensor & self) {
return wrapper_CPU_trunc_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__unique_dim_consecutive(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
    // No device check
  // DeviceGuard omitted
  return at::native::unique_dim_consecutive_cpu(self, dim, return_inverse, return_counts);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("unique_dim_consecutive",
TORCH_FN(wrapper_CPU__unique_dim_consecutive));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
return wrapper_CPU__unique_dim_consecutive(self, dim, return_inverse, return_counts);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_self_where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::where(condition, self, other);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_self_out_where_out(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::where_self_out(condition, self, other, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("where.self",
TORCH_FN(wrapper_CPU_self_where));
m.impl("where.self_out",
TORCH_FN(wrapper_CPU_self_out_where_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_self_where(condition, self, other);
}
at::Tensor & where_out(at::Tensor & out, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_self_out_where_out(condition, self, other, out);
}
at::Tensor & where_outf(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_self_out_where_out(condition, self, other, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___weight_norm_interface_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::weight_norm_backward_cpu(grad_w, saved_v, saved_g, saved_norms, dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_weight_norm_interface_backward",
TORCH_FN(wrapper_CPU___weight_norm_interface_backward));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
return wrapper_CPU___weight_norm_interface_backward(grad_w, saved_v, saved_g, saved_norms, dim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___sample_dirichlet(const at::Tensor & self, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::_s_dirichlet_cpu(self, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_sample_dirichlet",
TORCH_FN(wrapper_CPU___sample_dirichlet));
}
} // anonymous namespace
namespace cpu {
at::Tensor _sample_dirichlet(const at::Tensor & self, ::std::optional<at::Generator> generator) {
return wrapper_CPU___sample_dirichlet(self, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__binomial(const at::Tensor & count, const at::Tensor & prob, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::_s_binomial_cpu(count, prob, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("binomial",
TORCH_FN(wrapper_CPU__binomial));
}
} // anonymous namespace
namespace cpu {
at::Tensor binomial(const at::Tensor & count, const at::Tensor & prob, ::std::optional<at::Generator> generator) {
return wrapper_CPU__binomial(count, prob, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, bool update, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reserve) {
    // No device check
  // DeviceGuard omitted
  return at::native::_new_batch_norm_backward_cpu(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("batch_norm_backward",
TORCH_FN(wrapper_CPU__batch_norm_backward));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, bool update, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reserve) {
return wrapper_CPU__batch_norm_backward(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU__zero_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::zero_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("zero_",
TORCH_FN(wrapper_CPU__zero_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & zero_(at::Tensor & self) {
return wrapper_CPU__zero_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_heaviside_out_functional final : public at::native::structured_heaviside_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_heaviside(const at::Tensor & self, const at::Tensor & values) {
structured_heaviside_out_functional op;
op.meta(self, values);
op.impl(self, values, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_heaviside_out_out final : public at::native::structured_heaviside_out {
    structured_heaviside_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_heaviside_out_out(const at::Tensor & self, const at::Tensor & values, at::Tensor & out) {
structured_heaviside_out_out op(out);
op.meta(self, values);
op.impl(self, values, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_heaviside_out_inplace final : public at::native::structured_heaviside_out {
    structured_heaviside_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_heaviside_(at::Tensor & self, const at::Tensor & values) {
structured_heaviside_out_inplace op(self);
op.meta(self, values);
op.impl(self, values, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("heaviside", TORCH_FN(wrapper_CPU_heaviside));
m.impl("heaviside.out", TORCH_FN(wrapper_CPU_heaviside_out_out));
m.impl("heaviside_", TORCH_FN(wrapper_CPU_heaviside_));
}
} // anonymous namespace
namespace cpu {
at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values) {
return wrapper_CPU_heaviside(self, values);
}
at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values) {
return wrapper_CPU_heaviside_out_out(self, values, out);
}
at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out) {
return wrapper_CPU_heaviside_out_out(self, values, out);
}
at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values) {
return wrapper_CPU_heaviside_(self, values);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_addmm_out_cpu_functional final : public at::native::structured_addmm_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
structured_addmm_out_cpu_functional op;
op.meta(self, mat1, mat2, beta, alpha);
op.impl(self, mat1, mat2, beta, alpha, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_addmm_out_cpu_out final : public at::native::structured_addmm_out_cpu {
    structured_addmm_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_addmm_out_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
structured_addmm_out_cpu_out op(out);
op.meta(self, mat1, mat2, beta, alpha);
op.impl(self, mat1, mat2, beta, alpha, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_addmm_out_cpu_inplace final : public at::native::structured_addmm_out_cpu {
    structured_addmm_out_cpu_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
structured_addmm_out_cpu_inplace op(self);
op.meta(self, mat1, mat2, beta, alpha);
op.impl(self, mat1, mat2, beta, alpha, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("addmm", TORCH_FN(wrapper_CPU_addmm));
m.impl("addmm.out", TORCH_FN(wrapper_CPU_addmm_out_out));
m.impl("addmm_", TORCH_FN(wrapper_CPU_addmm_));
}
} // anonymous namespace
namespace cpu {
at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU_addmm(self, mat1, mat2, beta, alpha);
}
at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU_addmm_out_out(self, mat1, mat2, beta, alpha, out);
}
at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_CPU_addmm_out_out(self, mat1, mat2, beta, alpha, out);
}
at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU_addmm_(self, mat1, mat2, beta, alpha);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::dense_to_sparse_bsc(self, blocksize, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_to_sparse_bsc",
TORCH_FN(wrapper_CPU___to_sparse_bsc));
}
} // anonymous namespace
namespace cpu {
at::Tensor _to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
return wrapper_CPU___to_sparse_bsc(self, blocksize, dense_dim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__to_mkldnn(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::dense_to_mkldnn(self, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("to_mkldnn",
TORCH_FN(wrapper_CPU__to_mkldnn));
}
} // anonymous namespace
namespace cpu {
at::Tensor to_mkldnn(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU__to_mkldnn(self, dtype);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___make_per_tensor_quantized_tensor(const at::Tensor & self, double scale, int64_t zero_point) {
    // No device check
  // DeviceGuard omitted
  return at::native::make_per_tensor_quantized_tensor_cpu(self, scale, zero_point);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_make_per_tensor_quantized_tensor",
TORCH_FN(wrapper_CPU___make_per_tensor_quantized_tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor _make_per_tensor_quantized_tensor(const at::Tensor & self, double scale, int64_t zero_point) {
return wrapper_CPU___make_per_tensor_quantized_tensor(self, scale, zero_point);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_Scalar_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
    // No device check
  // DeviceGuard omitted
  return at::native::masked_fill__cpu(self, mask, value);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("masked_fill_.Scalar",
TORCH_FN(wrapper_CPU_Scalar_masked_fill_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
return wrapper_CPU_Scalar_masked_fill_(self, mask, value);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_Tensor_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
    // No device check
  // DeviceGuard omitted
  return at::native::masked_fill__cpu(self, mask, value);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("masked_fill_.Tensor",
TORCH_FN(wrapper_CPU_Tensor_masked_fill_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
return wrapper_CPU_Tensor_masked_fill_(self, mask, value);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___masked_softmax(const at::Tensor & self, const at::Tensor & mask, ::std::optional<int64_t> dim, ::std::optional<int64_t> mask_type) {
    // No device check
  // DeviceGuard omitted
  return at::native::masked_softmax_cpu(self, mask, dim, mask_type);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_masked_softmax",
TORCH_FN(wrapper_CPU___masked_softmax));
}
} // anonymous namespace
namespace cpu {
at::Tensor _masked_softmax(const at::Tensor & self, const at::Tensor & mask, ::std::optional<int64_t> dim, ::std::optional<int64_t> mask_type) {
return wrapper_CPU___masked_softmax(self, mask, dim, mask_type);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_bitwise_right_shift_out_functional final : public at::native::structured_bitwise_right_shift_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_bitwise_right_shift_Tensor(const at::Tensor & self, const at::Tensor & other) {
structured_bitwise_right_shift_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_bitwise_right_shift_out_out final : public at::native::structured_bitwise_right_shift_out {
    structured_bitwise_right_shift_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_bitwise_right_shift_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_bitwise_right_shift_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_bitwise_right_shift_out_inplace final : public at::native::structured_bitwise_right_shift_out {
    structured_bitwise_right_shift_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_bitwise_right_shift__Tensor(at::Tensor & self, const at::Tensor & other) {
structured_bitwise_right_shift_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("bitwise_right_shift.Tensor", TORCH_FN(wrapper_CPU_bitwise_right_shift_Tensor));
m.impl("bitwise_right_shift.Tensor_out", TORCH_FN(wrapper_CPU_bitwise_right_shift_out_Tensor_out));
m.impl("bitwise_right_shift_.Tensor", TORCH_FN(wrapper_CPU_bitwise_right_shift__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_bitwise_right_shift_Tensor(self, other);
}
at::Tensor & bitwise_right_shift_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_bitwise_right_shift_out_Tensor_out(self, other, out);
}
at::Tensor & bitwise_right_shift_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_bitwise_right_shift_out_Tensor_out(self, other, out);
}
at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_bitwise_right_shift__Tensor(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU__cauchy_(at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::cauchy_(self, median, sigma, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("cauchy_",
TORCH_FN(wrapper_CPU__cauchy_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & cauchy_(at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator) {
return wrapper_CPU__cauchy_(self, median, sigma, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU__log_normal_(at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::log_normal_(self, mean, std, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("log_normal_",
TORCH_FN(wrapper_CPU__log_normal_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & log_normal_(at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
return wrapper_CPU__log_normal_(self, mean, std, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_ne_Scalar_out_functional final : public at::native::structured_ne_Scalar_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_ne_Scalar(const at::Tensor & self, const at::Scalar & other) {
structured_ne_Scalar_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_ne_Scalar_out_out final : public at::native::structured_ne_Scalar_out {
    structured_ne_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_ne_out_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
structured_ne_Scalar_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_ne_Scalar_out_inplace final : public at::native::structured_ne_Scalar_out {
    structured_ne_Scalar_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_ne__Scalar(at::Tensor & self, const at::Scalar & other) {
structured_ne_Scalar_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("ne.Scalar", TORCH_FN(wrapper_CPU_ne_Scalar));
m.impl("ne.Scalar_out", TORCH_FN(wrapper_CPU_ne_out_Scalar_out));
m.impl("ne_.Scalar", TORCH_FN(wrapper_CPU_ne__Scalar));
}
} // anonymous namespace
namespace cpu {
at::Tensor ne(const at::Tensor & self, const at::Scalar & other) {
return wrapper_CPU_ne_Scalar(self, other);
}
at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
return wrapper_CPU_ne_out_Scalar_out(self, other, out);
}
at::Tensor & ne_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
return wrapper_CPU_ne_out_Scalar_out(self, other, out);
}
at::Tensor & ne_(at::Tensor & self, const at::Scalar & other) {
return wrapper_CPU_ne__Scalar(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_ne_Tensor_out_functional final : public at::native::structured_ne_Tensor_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_ne_Tensor(const at::Tensor & self, const at::Tensor & other) {
structured_ne_Tensor_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_ne_Tensor_out_out final : public at::native::structured_ne_Tensor_out {
    structured_ne_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_ne_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_ne_Tensor_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_ne_Tensor_out_inplace final : public at::native::structured_ne_Tensor_out {
    structured_ne_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_ne__Tensor(at::Tensor & self, const at::Tensor & other) {
structured_ne_Tensor_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("ne.Tensor", TORCH_FN(wrapper_CPU_ne_Tensor));
m.impl("ne.Tensor_out", TORCH_FN(wrapper_CPU_ne_out_Tensor_out));
m.impl("ne_.Tensor", TORCH_FN(wrapper_CPU_ne__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor ne(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_ne_Tensor(self, other);
}
at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_ne_out_Tensor_out(self, other, out);
}
at::Tensor & ne_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_ne_out_Tensor_out(self, other, out);
}
at::Tensor & ne_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_ne__Tensor(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_addcmul_out_functional final : public at::native::structured_addcmul_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
structured_addcmul_out_functional op;
op.meta(self, tensor1, tensor2, value);
op.impl(self, tensor1, tensor2, value, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_addcmul_out_out final : public at::native::structured_addcmul_out {
    structured_addcmul_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_addcmul_out_out(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
structured_addcmul_out_out op(out);
op.meta(self, tensor1, tensor2, value);
op.impl(self, tensor1, tensor2, value, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_addcmul_out_inplace final : public at::native::structured_addcmul_out {
    structured_addcmul_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
structured_addcmul_out_inplace op(self);
op.meta(self, tensor1, tensor2, value);
op.impl(self, tensor1, tensor2, value, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("addcmul", TORCH_FN(wrapper_CPU_addcmul));
m.impl("addcmul.out", TORCH_FN(wrapper_CPU_addcmul_out_out));
m.impl("addcmul_", TORCH_FN(wrapper_CPU_addcmul_));
}
} // anonymous namespace
namespace cpu {
at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
return wrapper_CPU_addcmul(self, tensor1, tensor2, value);
}
at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
return wrapper_CPU_addcmul_out_out(self, tensor1, tensor2, value, out);
}
at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
return wrapper_CPU_addcmul_out_out(self, tensor1, tensor2, value, out);
}
at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
return wrapper_CPU_addcmul_(self, tensor1, tensor2, value);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
    // No device check
  // DeviceGuard omitted
  return at::native::ormqr(self, input2, input3, left, transpose);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_ormqr_out(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::ormqr_out(self, input2, input3, left, transpose, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("ormqr",
TORCH_FN(wrapper_CPU__ormqr));
m.impl("ormqr.out",
TORCH_FN(wrapper_CPU_out_ormqr_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
return wrapper_CPU__ormqr(self, input2, input3, left, transpose);
}
at::Tensor & ormqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
return wrapper_CPU_out_ormqr_out(self, input2, input3, left, transpose, out);
}
at::Tensor & ormqr_outf(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
return wrapper_CPU_out_ormqr_out(self, input2, input3, left, transpose, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_lu_unpack_out_functional final : public at::native::structured_lu_unpack_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 3> outputs_;
};
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
structured_lu_unpack_out_functional op;
op.meta(LU_data, LU_pivots, unpack_data, unpack_pivots);
op.impl(LU_data, LU_pivots, unpack_data, unpack_pivots, op.outputs_[0], op.outputs_[1], op.outputs_[2]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]), std::move(op.outputs_[2]));
}
struct structured_lu_unpack_out_out final : public at::native::structured_lu_unpack_out {
    structured_lu_unpack_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 3> outputs_;
    std::array<::std::optional<Tensor>, 3> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_lu_unpack_out_out(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
structured_lu_unpack_out_out op(P, L, U);
op.meta(LU_data, LU_pivots, unpack_data, unpack_pivots);
op.impl(LU_data, LU_pivots, unpack_data, unpack_pivots, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(*op.proxy_outputs_[2]);
return std::forward_as_tuple(P, L, U);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("lu_unpack", TORCH_FN(wrapper_CPU_lu_unpack));
m.impl("lu_unpack.out", TORCH_FN(wrapper_CPU_lu_unpack_out_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
return wrapper_CPU_lu_unpack(LU_data, LU_pivots, unpack_data, unpack_pivots);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
return wrapper_CPU_lu_unpack_out_out(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
return wrapper_CPU_lu_unpack_out_out(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__histc(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
    // No device check
  // DeviceGuard omitted
  return at::native::histogram_histc(self, bins, min, max);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_histc_out(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::histogram_histc_out(self, bins, min, max, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("histc",
TORCH_FN(wrapper_CPU__histc));
m.impl("histc.out",
TORCH_FN(wrapper_CPU_out_histc_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor histc(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
return wrapper_CPU__histc(self, bins, min, max);
}
at::Tensor & histc_out(at::Tensor & out, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
return wrapper_CPU_out_histc_out(self, bins, min, max, out);
}
at::Tensor & histc_outf(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) {
return wrapper_CPU_out_histc_out(self, bins, min, max, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___histogramdd_from_bin_cts(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    // No device check
  // DeviceGuard omitted
  return at::native::_histogramdd(self, bins, range, weight, density);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_histogramdd_from_bin_cts",
TORCH_FN(wrapper_CPU___histogramdd_from_bin_cts));
}
} // anonymous namespace
namespace cpu {
at::Tensor _histogramdd_from_bin_cts(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
return wrapper_CPU___histogramdd_from_bin_cts(self, bins, range, weight, density);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_hypot_out_functional final : public at::native::structured_hypot_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_hypot(const at::Tensor & self, const at::Tensor & other) {
structured_hypot_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_hypot_out_out final : public at::native::structured_hypot_out {
    structured_hypot_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_hypot_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_hypot_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_hypot_out_inplace final : public at::native::structured_hypot_out {
    structured_hypot_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_hypot_(at::Tensor & self, const at::Tensor & other) {
structured_hypot_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("hypot", TORCH_FN(wrapper_CPU_hypot));
m.impl("hypot.out", TORCH_FN(wrapper_CPU_hypot_out_out));
m.impl("hypot_", TORCH_FN(wrapper_CPU_hypot_));
}
} // anonymous namespace
namespace cpu {
at::Tensor hypot(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_hypot(self, other);
}
at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_hypot_out_out(self, other, out);
}
at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_hypot_out_out(self, other, out);
}
at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_hypot_(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__min(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::min(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_unary_out_min_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::min_unary_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("min",
TORCH_FN(wrapper_CPU__min));
m.impl("min.unary_out",
TORCH_FN(wrapper_CPU_unary_out_min_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor min(const at::Tensor & self) {
return wrapper_CPU__min(self);
}
at::Tensor & min_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_unary_out_min_out(self, out);
}
at::Tensor & min_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_unary_out_min_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
    // No device check
  // DeviceGuard omitted
  return at::native::unfold(self, dimension, size, step);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("unfold",
TORCH_FN(wrapper_CPU__unfold));
}
} // anonymous namespace
namespace cpu {
at::Tensor unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
return wrapper_CPU__unfold(self, dimension, size, step);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Tensor_bucketize(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
    // No device check
  // DeviceGuard omitted
  return at::native::bucketize_cpu(self, boundaries, out_int32, right);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_Tensor_out_bucketize_out(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::bucketize_out_cpu(self, boundaries, out_int32, right, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("bucketize.Tensor",
TORCH_FN(wrapper_CPU_Tensor_bucketize));
m.impl("bucketize.Tensor_out",
TORCH_FN(wrapper_CPU_Tensor_out_bucketize_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor bucketize(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
return wrapper_CPU_Tensor_bucketize(self, boundaries, out_int32, right);
}
at::Tensor & bucketize_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
return wrapper_CPU_Tensor_out_bucketize_out(self, boundaries, out_int32, right, out);
}
at::Tensor & bucketize_outf(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
return wrapper_CPU_Tensor_out_bucketize_out(self, boundaries, out_int32, right, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Scalar_bucketize(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
    // No device check
  // DeviceGuard omitted
  return at::native::bucketize_cpu(self, boundaries, out_int32, right);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("bucketize.Scalar",
TORCH_FN(wrapper_CPU_Scalar_bucketize));
}
} // anonymous namespace
namespace cpu {
at::Tensor bucketize(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
return wrapper_CPU_Scalar_bucketize(self, boundaries, out_int32, right);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_mse_loss_out_functional final : public at::native::structured_mse_loss_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mse_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mse_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
structured_mse_loss_out_functional op;
op.meta(self, target, reduction);
op.impl(self, target, reduction, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_mse_loss_out_out final : public at::native::structured_mse_loss_out {
    structured_mse_loss_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mse_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mse_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_mse_loss_out_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
structured_mse_loss_out_out op(out);
op.meta(self, target, reduction);
op.impl(self, target, reduction, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("mse_loss", TORCH_FN(wrapper_CPU_mse_loss));
m.impl("mse_loss.out", TORCH_FN(wrapper_CPU_mse_loss_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
return wrapper_CPU_mse_loss(self, target, reduction);
}
at::Tensor & mse_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
return wrapper_CPU_mse_loss_out_out(self, target, reduction, out);
}
at::Tensor & mse_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
return wrapper_CPU_mse_loss_out_out(self, target, reduction, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__multi_margin_loss(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    // No device check
  // DeviceGuard omitted
  return at::native::multi_margin_loss_cpu(self, target, p, margin, weight, reduction);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_multi_margin_loss_out(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::multi_margin_loss_cpu_out(self, target, p, margin, weight, reduction, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("multi_margin_loss",
TORCH_FN(wrapper_CPU__multi_margin_loss));
m.impl("multi_margin_loss.out",
TORCH_FN(wrapper_CPU_out_multi_margin_loss_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor multi_margin_loss(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
return wrapper_CPU__multi_margin_loss(self, target, p, margin, weight, reduction);
}
at::Tensor & multi_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
return wrapper_CPU_out_multi_margin_loss_out(self, target, p, margin, weight, reduction, out);
}
at::Tensor & multi_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
return wrapper_CPU_out_multi_margin_loss_out(self, target, p, margin, weight, reduction, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__multilabel_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    // No device check
  // DeviceGuard omitted
  return at::native::multilabel_margin_loss_forward_cpu(self, target, reduction);
}
} // anonymous namespace
namespace {
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_output_multilabel_margin_loss_forward_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) {
    // No device check
  // DeviceGuard omitted
  return at::native::multilabel_margin_loss_forward_out_cpu(self, target, reduction, output, is_target);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("multilabel_margin_loss_forward",
TORCH_FN(wrapper_CPU__multilabel_margin_loss_forward));
m.impl("multilabel_margin_loss_forward.output",
TORCH_FN(wrapper_CPU_output_multilabel_margin_loss_forward_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
return wrapper_CPU__multilabel_margin_loss_forward(self, target, reduction);
}
::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_out(at::Tensor & output, at::Tensor & is_target, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
return wrapper_CPU_output_multilabel_margin_loss_forward_out(self, target, reduction, output, is_target);
}
::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) {
return wrapper_CPU_output_multilabel_margin_loss_forward_out(self, target, reduction, output, is_target);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    // No device check
  // DeviceGuard omitted
  return at::native::nll_loss2d_forward_cpu(self, target, weight, reduction, ignore_index.guard_int(__FILE__, __LINE__));
}
} // anonymous namespace
namespace {
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_output_nll_loss2d_forward_out(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
    // No device check
  // DeviceGuard omitted
  return at::native::nll_loss2d_forward_out_cpu(self, target, weight, reduction, ignore_index.guard_int(__FILE__, __LINE__), output, total_weight);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("nll_loss2d_forward",
TORCH_FN(wrapper_CPU__nll_loss2d_forward));
m.impl("nll_loss2d_forward.output",
TORCH_FN(wrapper_CPU_output_nll_loss2d_forward_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
return wrapper_CPU__nll_loss2d_forward(self, target, weight, reduction, ignore_index);
}
::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward_symint(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
return wrapper_CPU__nll_loss2d_forward(self, target, weight, reduction, ignore_index);
}
::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
return wrapper_CPU_output_nll_loss2d_forward_out(self, target, weight, reduction, ignore_index, output, total_weight);
}
::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_outf(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) {
return wrapper_CPU_output_nll_loss2d_forward_out(self, target, weight, reduction, ignore_index, output, total_weight);
}
::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_symint_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
return wrapper_CPU_output_nll_loss2d_forward_out(self, target, weight, reduction, ignore_index, output, total_weight);
}
::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_symint_outf(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
return wrapper_CPU_output_nll_loss2d_forward_out(self, target, weight, reduction, ignore_index, output, total_weight);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::glu_backward_cpu(grad_output, self, dim);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_grad_input_glu_backward_out(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::glu_backward_cpu_out(grad_output, self, dim, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("glu_backward",
TORCH_FN(wrapper_CPU__glu_backward));
m.impl("glu_backward.grad_input",
TORCH_FN(wrapper_CPU_grad_input_glu_backward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
return wrapper_CPU__glu_backward(grad_output, self, dim);
}
at::Tensor & glu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
return wrapper_CPU_grad_input_glu_backward_out(grad_output, self, dim, grad_input);
}
at::Tensor & glu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) {
return wrapper_CPU_grad_input_glu_backward_out(grad_output, self, dim, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__glu_backward_jvp(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::glu_backward_jvp(grad_x, grad_glu, x, dgrad_glu, dx, dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("glu_backward_jvp",
TORCH_FN(wrapper_CPU__glu_backward_jvp));
}
} // anonymous namespace
namespace cpu {
at::Tensor glu_backward_jvp(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
return wrapper_CPU__glu_backward_jvp(grad_x, grad_glu, x, dgrad_glu, dx, dim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__hardtanh(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
    // No device check
  // DeviceGuard omitted
  return at::native::hardtanh(self, min_val, max_val);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_hardtanh_out(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::hardtanh_out(self, min_val, max_val, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU__hardtanh_(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
    // No device check
  // DeviceGuard omitted
  return at::native::hardtanh_(self, min_val, max_val);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("hardtanh",
TORCH_FN(wrapper_CPU__hardtanh));
m.impl("hardtanh.out",
TORCH_FN(wrapper_CPU_out_hardtanh_out));
m.impl("hardtanh_",
TORCH_FN(wrapper_CPU__hardtanh_));
}
} // anonymous namespace
namespace cpu {
at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
return wrapper_CPU__hardtanh(self, min_val, max_val);
}
at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
return wrapper_CPU_out_hardtanh_out(self, min_val, max_val, out);
}
at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
return wrapper_CPU_out_hardtanh_out(self, min_val, max_val, out);
}
at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
return wrapper_CPU__hardtanh_(self, min_val, max_val);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
    // No device check
  // DeviceGuard omitted
  return at::native::hardtanh_backward(grad_output, self, min_val, max_val);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_grad_input_hardtanh_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::hardtanh_backward_out(grad_output, self, min_val, max_val, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("hardtanh_backward",
TORCH_FN(wrapper_CPU__hardtanh_backward));
m.impl("hardtanh_backward.grad_input",
TORCH_FN(wrapper_CPU_grad_input_hardtanh_backward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
return wrapper_CPU__hardtanh_backward(grad_output, self, min_val, max_val);
}
at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
return wrapper_CPU_grad_input_hardtanh_backward_out(grad_output, self, min_val, max_val, grad_input);
}
at::Tensor & hardtanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) {
return wrapper_CPU_grad_input_hardtanh_backward_out(grad_output, self, min_val, max_val, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_leaky_relu_out_functional final : public at::native::structured_leaky_relu_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) {
structured_leaky_relu_out_functional op;
op.meta(self, negative_slope);
op.impl(self, negative_slope, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_leaky_relu_out_out final : public at::native::structured_leaky_relu_out {
    structured_leaky_relu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_leaky_relu_out_out(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
structured_leaky_relu_out_out op(out);
op.meta(self, negative_slope);
op.impl(self, negative_slope, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_leaky_relu_out_inplace final : public at::native::structured_leaky_relu_out {
    structured_leaky_relu_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope) {
structured_leaky_relu_out_inplace op(self);
op.meta(self, negative_slope);
op.impl(self, negative_slope, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("leaky_relu", TORCH_FN(wrapper_CPU_leaky_relu));
m.impl("leaky_relu.out", TORCH_FN(wrapper_CPU_leaky_relu_out_out));
m.impl("leaky_relu_", TORCH_FN(wrapper_CPU_leaky_relu_));
}
} // anonymous namespace
namespace cpu {
at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) {
return wrapper_CPU_leaky_relu(self, negative_slope);
}
at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope) {
return wrapper_CPU_leaky_relu_out_out(self, negative_slope, out);
}
at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
return wrapper_CPU_leaky_relu_out_out(self, negative_slope, out);
}
at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope) {
return wrapper_CPU_leaky_relu_(self, negative_slope);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
    // No device check
  // DeviceGuard omitted
  return at::native::log_sigmoid_backward_cpu(grad_output, self, buffer);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_grad_input_log_sigmoid_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::log_sigmoid_backward_cpu_out(grad_output, self, buffer, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("log_sigmoid_backward",
TORCH_FN(wrapper_CPU__log_sigmoid_backward));
m.impl("log_sigmoid_backward.grad_input",
TORCH_FN(wrapper_CPU_grad_input_log_sigmoid_backward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
return wrapper_CPU__log_sigmoid_backward(grad_output, self, buffer);
}
at::Tensor & log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
return wrapper_CPU_grad_input_log_sigmoid_backward_out(grad_output, self, buffer, grad_input);
}
at::Tensor & log_sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) {
return wrapper_CPU_grad_input_log_sigmoid_backward_out(grad_output, self, buffer, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_softplus_out_functional final : public at::native::structured_softplus_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softplus_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softplus_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_softplus(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
structured_softplus_out_functional op;
op.meta(self, beta, threshold);
op.impl(self, beta, threshold, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_softplus_out_out final : public at::native::structured_softplus_out {
    structured_softplus_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softplus_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softplus_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_softplus_out_out(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
structured_softplus_out_out op(out);
op.meta(self, beta, threshold);
op.impl(self, beta, threshold, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("softplus", TORCH_FN(wrapper_CPU_softplus));
m.impl("softplus.out", TORCH_FN(wrapper_CPU_softplus_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor softplus(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
return wrapper_CPU_softplus(self, beta, threshold);
}
at::Tensor & softplus_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
return wrapper_CPU_softplus_out_out(self, beta, threshold, out);
}
at::Tensor & softplus_outf(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
return wrapper_CPU_softplus_out_out(self, beta, threshold, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_adaptive_avg_pool2d_out(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::adaptive_avg_pool2d_out_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("adaptive_avg_pool2d.out",
TORCH_FN(wrapper_CPU_out_adaptive_avg_pool2d_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
return wrapper_CPU_out_adaptive_avg_pool2d_out(self, c10::fromIntArrayRefSlow(output_size), out);
}
at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
return wrapper_CPU_out_adaptive_avg_pool2d_out(self, c10::fromIntArrayRefSlow(output_size), out);
}
at::Tensor & adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
return wrapper_CPU_out_adaptive_avg_pool2d_out(self, output_size, out);
}
at::Tensor & adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
return wrapper_CPU_out_adaptive_avg_pool2d_out(self, output_size, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::adaptive_avg_pool3d_backward_cpu(grad_output, self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_adaptive_avg_pool3d_backward",
TORCH_FN(wrapper_CPU___adaptive_avg_pool3d_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
return wrapper_CPU___adaptive_avg_pool3d_backward(grad_output, self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_adaptive_max_pool2d_backward_out_cpu_functional final : public at::native::structured_adaptive_max_pool2d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
structured_adaptive_max_pool2d_backward_out_cpu_functional op;
op.meta(grad_output, self, indices);
op.impl(grad_output, self, indices, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_adaptive_max_pool2d_backward_out_cpu_out final : public at::native::structured_adaptive_max_pool2d_backward_out_cpu {
    structured_adaptive_max_pool2d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_adaptive_max_pool2d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
structured_adaptive_max_pool2d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, self, indices);
op.impl(grad_output, self, indices, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("adaptive_max_pool2d_backward", TORCH_FN(wrapper_CPU_adaptive_max_pool2d_backward));
m.impl("adaptive_max_pool2d_backward.grad_input", TORCH_FN(wrapper_CPU_adaptive_max_pool2d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
return wrapper_CPU_adaptive_max_pool2d_backward(grad_output, self, indices);
}
at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
return wrapper_CPU_adaptive_max_pool2d_backward_out_grad_input(grad_output, self, indices, grad_input);
}
at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
return wrapper_CPU_adaptive_max_pool2d_backward_out_grad_input(grad_output, self, indices, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_fractional_max_pool2d_backward_cpu_functional final : public at::native::structured_fractional_max_pool2d_backward_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
structured_fractional_max_pool2d_backward_cpu_functional op;
op.meta(grad_output, self, kernel_size, output_size, indices);
op.impl(grad_output, self, kernel_size, output_size, indices, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_fractional_max_pool2d_backward_cpu_out final : public at::native::structured_fractional_max_pool2d_backward_cpu {
    structured_fractional_max_pool2d_backward_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_fractional_max_pool2d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
structured_fractional_max_pool2d_backward_cpu_out op(grad_input);
op.meta(grad_output, self, kernel_size, output_size, indices);
op.impl(grad_output, self, kernel_size, output_size, indices, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("fractional_max_pool2d_backward", TORCH_FN(wrapper_CPU_fractional_max_pool2d_backward));
m.impl("fractional_max_pool2d_backward.grad_input", TORCH_FN(wrapper_CPU_fractional_max_pool2d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
return wrapper_CPU_fractional_max_pool2d_backward(grad_output, self, kernel_size, output_size, indices);
}
at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
return wrapper_CPU_fractional_max_pool2d_backward_out_grad_input(grad_output, self, kernel_size, output_size, indices, grad_input);
}
at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
return wrapper_CPU_fractional_max_pool2d_backward_out_grad_input(grad_output, self, kernel_size, output_size, indices, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
    // No device check
  // DeviceGuard omitted
  return at::native::fractional_max_pool3d_backward_cpu(grad_output, self, kernel_size, output_size, indices);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_grad_input_fractional_max_pool3d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::fractional_max_pool3d_backward_out_cpu(grad_output, self, kernel_size, output_size, indices, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("fractional_max_pool3d_backward",
TORCH_FN(wrapper_CPU__fractional_max_pool3d_backward));
m.impl("fractional_max_pool3d_backward.grad_input",
TORCH_FN(wrapper_CPU_grad_input_fractional_max_pool3d_backward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
return wrapper_CPU__fractional_max_pool3d_backward(grad_output, self, kernel_size, output_size, indices);
}
at::Tensor & fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
return wrapper_CPU_grad_input_fractional_max_pool3d_backward_out(grad_output, self, kernel_size, output_size, indices, grad_input);
}
at::Tensor & fractional_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
return wrapper_CPU_grad_input_fractional_max_pool3d_backward_out(grad_output, self, kernel_size, output_size, indices, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__max_pool3d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    // No device check
  // DeviceGuard omitted
  return at::native::max_pool3d_with_indices_cpu(self, kernel_size, stride, padding, dilation, ceil_mode);
}
} // anonymous namespace
namespace {
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_out_max_pool3d_with_indices_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
    // No device check
  // DeviceGuard omitted
  return at::native::max_pool3d_with_indices_out_cpu(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("max_pool3d_with_indices",
TORCH_FN(wrapper_CPU__max_pool3d_with_indices));
m.impl("max_pool3d_with_indices.out",
TORCH_FN(wrapper_CPU_out_max_pool3d_with_indices_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
return wrapper_CPU__max_pool3d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
}
::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
return wrapper_CPU_out_max_pool3d_with_indices_out(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
}
::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
return wrapper_CPU_out_max_pool3d_with_indices_out(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
    // No device check
  // DeviceGuard omitted
  return at::native::max_pool3d_with_indices_backward_cpu(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_grad_input_max_pool3d_with_indices_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::max_pool3d_with_indices_backward_out_cpu(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("max_pool3d_with_indices_backward",
TORCH_FN(wrapper_CPU__max_pool3d_with_indices_backward));
m.impl("max_pool3d_with_indices_backward.grad_input",
TORCH_FN(wrapper_CPU_grad_input_max_pool3d_with_indices_backward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
return wrapper_CPU__max_pool3d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}
at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
return wrapper_CPU_grad_input_max_pool3d_with_indices_backward_out(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
}
at::Tensor & max_pool3d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
return wrapper_CPU_grad_input_max_pool3d_with_indices_backward_out(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__max_unpool2d(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) {
    // No device check
  // DeviceGuard omitted
  return at::native::max_unpooling2d_forward_cpu(self, indices, C10_AS_INTARRAYREF_SLOW(output_size));
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_max_unpool2d_out(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::max_unpooling2d_forward_out_cpu(self, indices, C10_AS_INTARRAYREF_SLOW(output_size), out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("max_unpool2d",
TORCH_FN(wrapper_CPU__max_unpool2d));
m.impl("max_unpool2d.out",
TORCH_FN(wrapper_CPU_out_max_unpool2d_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
return wrapper_CPU__max_unpool2d(self, indices, c10::fromIntArrayRefSlow(output_size));
}
at::Tensor max_unpool2d_symint(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) {
return wrapper_CPU__max_unpool2d(self, indices, output_size);
}
at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
return wrapper_CPU_out_max_unpool2d_out(self, indices, c10::fromIntArrayRefSlow(output_size), out);
}
at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) {
return wrapper_CPU_out_max_unpool2d_out(self, indices, c10::fromIntArrayRefSlow(output_size), out);
}
at::Tensor & max_unpool2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) {
return wrapper_CPU_out_max_unpool2d_out(self, indices, output_size, out);
}
at::Tensor & max_unpool2d_symint_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out) {
return wrapper_CPU_out_max_unpool2d_out(self, indices, output_size, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    // No device check
  // DeviceGuard omitted
  return at::native::reflection_pad2d_backward_cpu(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding));
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_grad_input_reflection_pad2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::reflection_pad2d_backward_out_cpu(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("reflection_pad2d_backward",
TORCH_FN(wrapper_CPU__reflection_pad2d_backward));
m.impl("reflection_pad2d_backward.grad_input",
TORCH_FN(wrapper_CPU_grad_input_reflection_pad2d_backward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
return wrapper_CPU__reflection_pad2d_backward(grad_output, self, c10::fromIntArrayRefSlow(padding));
}
at::Tensor reflection_pad2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
return wrapper_CPU__reflection_pad2d_backward(grad_output, self, padding);
}
at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
return wrapper_CPU_grad_input_reflection_pad2d_backward_out(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
}
at::Tensor & reflection_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
return wrapper_CPU_grad_input_reflection_pad2d_backward_out(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
}
at::Tensor & reflection_pad2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
return wrapper_CPU_grad_input_reflection_pad2d_backward_out(grad_output, self, padding, grad_input);
}
at::Tensor & reflection_pad2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
return wrapper_CPU_grad_input_reflection_pad2d_backward_out(grad_output, self, padding, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_bilinear2d_out_cpu_functional final : public at::native::structured_upsample_bilinear2d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured_upsample_bilinear2d_out_cpu_functional op;
op.meta(self, output_size, align_corners, scales_h, scales_w);
op.impl(self, output_size, align_corners, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_bilinear2d_out_cpu_out final : public at::native::structured_upsample_bilinear2d_out_cpu {
    structured_upsample_bilinear2d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_bilinear2d_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
structured_upsample_bilinear2d_out_cpu_out op(out);
op.meta(self, output_size, align_corners, scales_h, scales_w);
op.impl(self, output_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_bilinear2d", TORCH_FN(wrapper_CPU_upsample_bilinear2d));
m.impl("upsample_bilinear2d.out", TORCH_FN(wrapper_CPU_upsample_bilinear2d_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bilinear2d(self, output_size, align_corners, scales_h, scales_w);
}
at::Tensor upsample_bilinear2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bilinear2d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
}
at::Tensor & upsample_bilinear2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bilinear2d_out_out(self, output_size, align_corners, scales_h, scales_w, out);
}
at::Tensor & upsample_bilinear2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU_upsample_bilinear2d_out_out(self, output_size, align_corners, scales_h, scales_w, out);
}
at::Tensor & upsample_bilinear2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bilinear2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
}
at::Tensor & upsample_bilinear2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU_upsample_bilinear2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_bilinear2d_backward_out_cpu_functional final : public at::native::structured_upsample_bilinear2d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured_upsample_bilinear2d_backward_out_cpu_functional op;
op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_bilinear2d_backward_out_cpu_out final : public at::native::structured_upsample_bilinear2d_backward_out_cpu {
    structured_upsample_bilinear2d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_bilinear2d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
structured_upsample_bilinear2d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_bilinear2d_backward", TORCH_FN(wrapper_CPU_upsample_bilinear2d_backward));
m.impl("upsample_bilinear2d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_bilinear2d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bilinear2d_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}
at::Tensor upsample_bilinear2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bilinear2d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
}
at::Tensor & upsample_bilinear2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bilinear2d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_bilinear2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU_upsample_bilinear2d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_bilinear2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bilinear2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_bilinear2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU_upsample_bilinear2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__upsample_bilinear2d_aa_out_cpu_functional final : public at::native::structured__upsample_bilinear2d_aa_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured__upsample_bilinear2d_aa_out_cpu_functional op;
op.meta(self, output_size, align_corners, scales_h, scales_w);
op.impl(self, output_size, align_corners, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured__upsample_bilinear2d_aa_out_cpu_out final : public at::native::structured__upsample_bilinear2d_aa_out_cpu {
    structured__upsample_bilinear2d_aa_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__upsample_bilinear2d_aa_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
structured__upsample_bilinear2d_aa_out_cpu_out op(out);
op.meta(self, output_size, align_corners, scales_h, scales_w);
op.impl(self, output_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_upsample_bilinear2d_aa", TORCH_FN(wrapper_CPU__upsample_bilinear2d_aa));
m.impl("_upsample_bilinear2d_aa.out", TORCH_FN(wrapper_CPU__upsample_bilinear2d_aa_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bilinear2d_aa(self, output_size, align_corners, scales_h, scales_w);
}
at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bilinear2d_aa(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
}
at::Tensor & _upsample_bilinear2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bilinear2d_aa_out_out(self, output_size, align_corners, scales_h, scales_w, out);
}
at::Tensor & _upsample_bilinear2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU__upsample_bilinear2d_aa_out_out(self, output_size, align_corners, scales_h, scales_w, out);
}
at::Tensor & _upsample_bilinear2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bilinear2d_aa_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
}
at::Tensor & _upsample_bilinear2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU__upsample_bilinear2d_aa_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_trilinear3d_backward_out_cpu_functional final : public at::native::structured_upsample_trilinear3d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured_upsample_trilinear3d_backward_out_cpu_functional op;
op.meta(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_trilinear3d_backward_out_cpu_out final : public at::native::structured_upsample_trilinear3d_backward_out_cpu {
    structured_upsample_trilinear3d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_trilinear3d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
structured_upsample_trilinear3d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_trilinear3d_backward", TORCH_FN(wrapper_CPU_upsample_trilinear3d_backward));
m.impl("upsample_trilinear3d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_trilinear3d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_trilinear3d_backward(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
}
at::Tensor upsample_trilinear3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_trilinear3d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_d, scales_h, scales_w);
}
at::Tensor & upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_trilinear3d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU_upsample_trilinear3d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_trilinear3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_trilinear3d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_d, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_trilinear3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU_upsample_trilinear3d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_d, scales_h, scales_w, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__upsample_nearest_exact3d_out_cpu_functional final : public at::native::structured__upsample_nearest_exact3d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured__upsample_nearest_exact3d_out_cpu_functional op;
op.meta(self, output_size, scales_d, scales_h, scales_w);
op.impl(self, output_size, scales_d, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured__upsample_nearest_exact3d_out_cpu_out final : public at::native::structured__upsample_nearest_exact3d_out_cpu {
    structured__upsample_nearest_exact3d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__upsample_nearest_exact3d_out_out(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
structured__upsample_nearest_exact3d_out_cpu_out op(out);
op.meta(self, output_size, scales_d, scales_h, scales_w);
op.impl(self, output_size, scales_d, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_upsample_nearest_exact3d", TORCH_FN(wrapper_CPU__upsample_nearest_exact3d));
m.impl("_upsample_nearest_exact3d.out", TORCH_FN(wrapper_CPU__upsample_nearest_exact3d_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_nearest_exact3d(self, output_size, scales_d, scales_h, scales_w);
}
at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_nearest_exact3d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w);
}
at::Tensor & _upsample_nearest_exact3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_nearest_exact3d_out_out(self, output_size, scales_d, scales_h, scales_w, out);
}
at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU__upsample_nearest_exact3d_out_out(self, output_size, scales_d, scales_h, scales_w, out);
}
at::Tensor & _upsample_nearest_exact3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_nearest_exact3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w, out);
}
at::Tensor & _upsample_nearest_exact3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU__upsample_nearest_exact3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_nearest3d_backward_out_cpu_functional final : public at::native::structured_upsample_nearest3d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured_upsample_nearest3d_backward_out_cpu_functional op;
op.meta(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, scales_d, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_nearest3d_backward_out_cpu_out final : public at::native::structured_upsample_nearest3d_backward_out_cpu {
    structured_upsample_nearest3d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_nearest3d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
structured_upsample_nearest3d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, scales_d, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_nearest3d_backward", TORCH_FN(wrapper_CPU_upsample_nearest3d_backward));
m.impl("upsample_nearest3d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_nearest3d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest3d_backward(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
}
at::Tensor upsample_nearest3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest3d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_d, scales_h, scales_w);
}
at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest3d_backward_out_grad_input(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU_upsample_nearest3d_backward_out_grad_input(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_nearest3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest3d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_d, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_nearest3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU_upsample_nearest3d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_d, scales_h, scales_w, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_logit_backward_out_functional final : public at::native::structured_logit_backward_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_logit_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_logit_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_logit_backward(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional<double> eps) {
structured_logit_backward_out_functional op;
op.meta(grad_output, self, eps);
op.impl(grad_output, self, eps, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_logit_backward_out_out final : public at::native::structured_logit_backward_out {
    structured_logit_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_logit_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_logit_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_logit_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional<double> eps, at::Tensor & grad_input) {
structured_logit_backward_out_out op(grad_input);
op.meta(grad_output, self, eps);
op.impl(grad_output, self, eps, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("logit_backward", TORCH_FN(wrapper_CPU_logit_backward));
m.impl("logit_backward.grad_input", TORCH_FN(wrapper_CPU_logit_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional<double> eps) {
return wrapper_CPU_logit_backward(grad_output, self, eps);
}
at::Tensor & logit_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional<double> eps) {
return wrapper_CPU_logit_backward_out_grad_input(grad_output, self, eps, grad_input);
}
at::Tensor & logit_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional<double> eps, at::Tensor & grad_input) {
return wrapper_CPU_logit_backward_out_grad_input(grad_output, self, eps, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_grad_input__slow_conv2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
    // No device check
  // DeviceGuard omitted
  return at::native::slow_conv2d_backward_out_cpu(grad_output, self, weight, C10_AS_INTARRAYREF_SLOW(kernel_size), C10_AS_INTARRAYREF_SLOW(stride), C10_AS_INTARRAYREF_SLOW(padding), grad_input, grad_weight, grad_bias);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_slow_conv2d_backward.grad_input",
TORCH_FN(wrapper_CPU_grad_input__slow_conv2d_backward_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding) {
return wrapper_CPU_grad_input__slow_conv2d_backward_out(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
return wrapper_CPU_grad_input__slow_conv2d_backward_out(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_symint_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
return wrapper_CPU_grad_input__slow_conv2d_backward_out(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
return wrapper_CPU_grad_input__slow_conv2d_backward_out(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_output_mask__slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask) {
    // No device check
  // DeviceGuard omitted
  return at::native::slow_conv2d_backward_cpu(grad_output, self, weight, C10_AS_INTARRAYREF_SLOW(kernel_size), C10_AS_INTARRAYREF_SLOW(stride), C10_AS_INTARRAYREF_SLOW(padding), output_mask);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_slow_conv2d_backward.output_mask",
TORCH_FN(wrapper_CPU_output_mask__slow_conv2d_backward));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
return wrapper_CPU_output_mask__slow_conv2d_backward(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask);
}
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask) {
return wrapper_CPU_output_mask__slow_conv2d_backward(grad_output, self, weight, kernel_size, stride, padding, output_mask);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
    // No device check
  // DeviceGuard omitted
  return at::native::slow_conv3d_forward_cpu(self, weight, C10_AS_INTARRAYREF_SLOW(kernel_size), bias, C10_AS_INTARRAYREF_SLOW(stride), C10_AS_INTARRAYREF_SLOW(padding));
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_output_slow_conv3d_forward_out(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
    // No device check
  // DeviceGuard omitted
  return at::native::slow_conv3d_forward_out_cpu(self, weight, C10_AS_INTARRAYREF_SLOW(kernel_size), bias, C10_AS_INTARRAYREF_SLOW(stride), C10_AS_INTARRAYREF_SLOW(padding), output);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("slow_conv3d_forward",
TORCH_FN(wrapper_CPU__slow_conv3d_forward));
m.impl("slow_conv3d_forward.output",
TORCH_FN(wrapper_CPU_output_slow_conv3d_forward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
return wrapper_CPU__slow_conv3d_forward(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding));
}
at::Tensor slow_conv3d_forward_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
return wrapper_CPU__slow_conv3d_forward(self, weight, kernel_size, bias, stride, padding);
}
at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
return wrapper_CPU_output_slow_conv3d_forward_out(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output);
}
at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) {
return wrapper_CPU_output_slow_conv3d_forward_out(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output);
}
at::Tensor & slow_conv3d_forward_symint_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
return wrapper_CPU_output_slow_conv3d_forward_out(self, weight, kernel_size, bias, stride, padding, output);
}
at::Tensor & slow_conv3d_forward_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
return wrapper_CPU_output_slow_conv3d_forward_out(self, weight, kernel_size, bias, stride, padding, output);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__slow_conv_dilated3d(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
    // No device check
  // DeviceGuard omitted
  return at::native::slow_conv_dilated3d_cpu(self, weight, C10_AS_INTARRAYREF_SLOW(kernel_size), bias, C10_AS_INTARRAYREF_SLOW(stride), C10_AS_INTARRAYREF_SLOW(padding), C10_AS_INTARRAYREF_SLOW(dilation));
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("slow_conv_dilated3d",
TORCH_FN(wrapper_CPU__slow_conv_dilated3d));
}
} // anonymous namespace
namespace cpu {
at::Tensor slow_conv_dilated3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) {
return wrapper_CPU__slow_conv_dilated3d(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation));
}
at::Tensor slow_conv_dilated3d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
return wrapper_CPU__slow_conv_dilated3d(self, weight, kernel_size, bias, stride, padding, dilation);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_log_ndtr_out_functional final : public at::native::structured_special_log_ndtr_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_log_ndtr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_log_ndtr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_log_ndtr(const at::Tensor & self) {
structured_special_log_ndtr_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_log_ndtr_out_out final : public at::native::structured_special_log_ndtr_out {
    structured_special_log_ndtr_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_log_ndtr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_log_ndtr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_log_ndtr_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_log_ndtr_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_log_ndtr", TORCH_FN(wrapper_CPU_special_log_ndtr));
m.impl("special_log_ndtr.out", TORCH_FN(wrapper_CPU_special_log_ndtr_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_log_ndtr(const at::Tensor & self) {
return wrapper_CPU_special_log_ndtr(self);
}
at::Tensor & special_log_ndtr_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_log_ndtr_out_out(self, out);
}
at::Tensor & special_log_ndtr_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_log_ndtr_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_erfcx_out_functional final : public at::native::structured_special_erfcx_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_erfcx_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_erfcx_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_erfcx(const at::Tensor & self) {
structured_special_erfcx_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_erfcx_out_out final : public at::native::structured_special_erfcx_out {
    structured_special_erfcx_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_erfcx_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_erfcx_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_erfcx_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_erfcx_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_erfcx", TORCH_FN(wrapper_CPU_special_erfcx));
m.impl("special_erfcx.out", TORCH_FN(wrapper_CPU_special_erfcx_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_erfcx(const at::Tensor & self) {
return wrapper_CPU_special_erfcx(self);
}
at::Tensor & special_erfcx_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_erfcx_out_out(self, out);
}
at::Tensor & special_erfcx_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_erfcx_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_xlog1py_out_functional final : public at::native::structured_special_xlog1py_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_xlog1py_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_xlog1py_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_xlog1py(const at::Tensor & self, const at::Tensor & other) {
structured_special_xlog1py_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_xlog1py_out_out final : public at::native::structured_special_xlog1py_out {
    structured_special_xlog1py_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_xlog1py_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_xlog1py_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_xlog1py_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_special_xlog1py_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_xlog1py", TORCH_FN(wrapper_CPU_special_xlog1py));
m.impl("special_xlog1py.out", TORCH_FN(wrapper_CPU_special_xlog1py_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_xlog1py(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_special_xlog1py(self, other);
}
at::Tensor & special_xlog1py_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_special_xlog1py_out_out(self, other, out);
}
at::Tensor & special_xlog1py_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_special_xlog1py_out_out(self, other, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_i1_out_functional final : public at::native::structured_special_i1_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_i1(const at::Tensor & self) {
structured_special_i1_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_i1_out_out final : public at::native::structured_special_i1_out {
    structured_special_i1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_i1_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_i1_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_i1", TORCH_FN(wrapper_CPU_special_i1));
m.impl("special_i1.out", TORCH_FN(wrapper_CPU_special_i1_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_i1(const at::Tensor & self) {
return wrapper_CPU_special_i1(self);
}
at::Tensor & special_i1_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_i1_out_out(self, out);
}
at::Tensor & special_i1_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_i1_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_i1e_out_functional final : public at::native::structured_special_i1e_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i1e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i1e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_i1e(const at::Tensor & self) {
structured_special_i1e_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_i1e_out_out final : public at::native::structured_special_i1e_out {
    structured_special_i1e_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i1e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i1e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_i1e_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_i1e_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_i1e", TORCH_FN(wrapper_CPU_special_i1e));
m.impl("special_i1e.out", TORCH_FN(wrapper_CPU_special_i1e_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_i1e(const at::Tensor & self) {
return wrapper_CPU_special_i1e(self);
}
at::Tensor & special_i1e_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_i1e_out_out(self, out);
}
at::Tensor & special_i1e_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_i1e_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_linalg_ldl_factor_ex_out_functional final : public at::native::structured_linalg_ldl_factor_ex_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 3> outputs_;
};
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian, bool check_errors) {
structured_linalg_ldl_factor_ex_out_functional op;
op.meta(self, hermitian, check_errors);
op.impl(self, hermitian, check_errors, op.outputs_[0], op.outputs_[1], op.outputs_[2]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]), std::move(op.outputs_[2]));
}
struct structured_linalg_ldl_factor_ex_out_out final : public at::native::structured_linalg_ldl_factor_ex_out {
    structured_linalg_ldl_factor_ex_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 3> outputs_;
    std::array<::std::optional<Tensor>, 3> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_linalg_ldl_factor_ex_out_out(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) {
structured_linalg_ldl_factor_ex_out_out op(LD, pivots, info);
op.meta(self, hermitian, check_errors);
op.impl(self, hermitian, check_errors, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(*op.proxy_outputs_[2]);
return std::forward_as_tuple(LD, pivots, info);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_ldl_factor_ex", TORCH_FN(wrapper_CPU_linalg_ldl_factor_ex));
m.impl("linalg_ldl_factor_ex.out", TORCH_FN(wrapper_CPU_linalg_ldl_factor_ex_out_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian, bool check_errors) {
return wrapper_CPU_linalg_ldl_factor_ex(self, hermitian, check_errors);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian, bool check_errors) {
return wrapper_CPU_linalg_ldl_factor_ex_out_out(self, hermitian, check_errors, LD, pivots, info);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) {
return wrapper_CPU_linalg_ldl_factor_ex_out_out(self, hermitian, check_errors, LD, pivots, info);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_out_linalg_lstsq_out(const at::Tensor & self, const at::Tensor & b, ::std::optional<double> rcond, ::std::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) {
    // No device check
  // DeviceGuard omitted
  return at::native::linalg_lstsq_out(self, b, rcond, driver, solution, residuals, rank, singular_values);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_lstsq.out",
TORCH_FN(wrapper_CPU_out_linalg_lstsq_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, ::std::optional<double> rcond, ::std::optional<c10::string_view> driver) {
return wrapper_CPU_out_linalg_lstsq_out(self, b, rcond, driver, solution, residuals, rank, singular_values);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, ::std::optional<double> rcond, ::std::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) {
return wrapper_CPU_out_linalg_lstsq_out(self, b, rcond, driver, solution, residuals, rank, singular_values);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__linalg_matrix_exp(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::linalg_matrix_exp(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_matrix_exp",
TORCH_FN(wrapper_CPU__linalg_matrix_exp));
}
} // anonymous namespace
namespace cpu {
at::Tensor linalg_matrix_exp(const at::Tensor & self) {
return wrapper_CPU__linalg_matrix_exp(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__linalg_eigh_out_functional final : public at::native::structured__linalg_eigh_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 2> outputs_;
};
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__linalg_eigh(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
structured__linalg_eigh_out_functional op;
op.meta(A, UPLO, compute_v);
op.impl(A, UPLO, compute_v, op.outputs_[0], op.outputs_[1]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]));
}
struct structured__linalg_eigh_out_out final : public at::native::structured__linalg_eigh_out {
    structured__linalg_eigh_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 2> outputs_;
    std::array<::std::optional<Tensor>, 2> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU__linalg_eigh_out_eigenvalues(const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
structured__linalg_eigh_out_out op(eigenvalues, eigenvectors);
op.meta(A, UPLO, compute_v);
op.impl(A, UPLO, compute_v, op.maybe_get_output(0), op.maybe_get_output(1));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
return std::forward_as_tuple(eigenvalues, eigenvectors);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_linalg_eigh", TORCH_FN(wrapper_CPU__linalg_eigh));
m.impl("_linalg_eigh.eigenvalues", TORCH_FN(wrapper_CPU__linalg_eigh_out_eigenvalues));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _linalg_eigh(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
return wrapper_CPU__linalg_eigh(A, UPLO, compute_v);
}
::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
return wrapper_CPU__linalg_eigh_out_eigenvalues(A, UPLO, compute_v, eigenvalues, eigenvectors);
}
::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_outf(const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
return wrapper_CPU__linalg_eigh_out_eigenvalues(A, UPLO, compute_v, eigenvalues, eigenvectors);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___transformer_encoder_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const ::std::optional<at::Tensor> & mask, ::std::optional<int64_t> mask_type) {
    // No device check
  // DeviceGuard omitted
  return at::native::transformer_encoder_layer_forward(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_transformer_encoder_layer_fwd",
TORCH_FN(wrapper_CPU___transformer_encoder_layer_fwd));
}
} // anonymous namespace
namespace cpu {
at::Tensor _transformer_encoder_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const ::std::optional<at::Tensor> & mask, ::std::optional<int64_t> mask_type) {
return wrapper_CPU___transformer_encoder_layer_fwd(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___native_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, ::std::optional<int64_t> mask_type) {
    // No device check
  // DeviceGuard omitted
  return at::native::native_multi_head_attention_cpu(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_native_multi_head_attention",
TORCH_FN(wrapper_CPU___native_multi_head_attention));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, ::std::optional<int64_t> mask_type) {
return wrapper_CPU___native_multi_head_attention(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_CPU___fused_sdp_choice(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, ::std::optional<double> scale, bool enable_gqa) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fused_sdp_choice_cpp(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fused_sdp_choice",
TORCH_FN(wrapper_CPU___fused_sdp_choice));
}
} // anonymous namespace
namespace cpu {
int64_t _fused_sdp_choice(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, ::std::optional<double> scale, bool enable_gqa) {
return wrapper_CPU___fused_sdp_choice(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_modified_bessel_i1_out_functional final : public at::native::structured_special_modified_bessel_i1_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_modified_bessel_i1(const at::Tensor & self) {
structured_special_modified_bessel_i1_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_modified_bessel_i1_out_out final : public at::native::structured_special_modified_bessel_i1_out {
    structured_special_modified_bessel_i1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_modified_bessel_i1_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_modified_bessel_i1_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_modified_bessel_i1", TORCH_FN(wrapper_CPU_special_modified_bessel_i1));
m.impl("special_modified_bessel_i1.out", TORCH_FN(wrapper_CPU_special_modified_bessel_i1_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_modified_bessel_i1(const at::Tensor & self) {
return wrapper_CPU_special_modified_bessel_i1(self);
}
at::Tensor & special_modified_bessel_i1_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_modified_bessel_i1_out_out(self, out);
}
at::Tensor & special_modified_bessel_i1_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_modified_bessel_i1_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_modified_bessel_k0_out_functional final : public at::native::structured_special_modified_bessel_k0_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_modified_bessel_k0(const at::Tensor & self) {
structured_special_modified_bessel_k0_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_modified_bessel_k0_out_out final : public at::native::structured_special_modified_bessel_k0_out {
    structured_special_modified_bessel_k0_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_modified_bessel_k0_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_modified_bessel_k0_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_modified_bessel_k0", TORCH_FN(wrapper_CPU_special_modified_bessel_k0));
m.impl("special_modified_bessel_k0.out", TORCH_FN(wrapper_CPU_special_modified_bessel_k0_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_modified_bessel_k0(const at::Tensor & self) {
return wrapper_CPU_special_modified_bessel_k0(self);
}
at::Tensor & special_modified_bessel_k0_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_modified_bessel_k0_out_out(self, out);
}
at::Tensor & special_modified_bessel_k0_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_modified_bessel_k0_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___foobar(const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
    // No device check
  // DeviceGuard omitted
  return at::native::foobar(self, arg1, arg2, arg3);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_foobar",
TORCH_FN(wrapper_CPU___foobar));
}
} // anonymous namespace
namespace cpu {
at::Tensor _foobar(const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
return wrapper_CPU___foobar(self, arg1, arg2, arg3);
}
} // namespace cpu
} // namespace at
