// required for old g++ to compile PRId64 macros, see
// https://github.com/pytorch/pytorch/issues/3571
// for context
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif

// an external backend might generate file within its code tree
// and check all the source files within the tree with clang-format.
// so, disable it since the backend might have a different config.
// clang-format off

// NOTE: This condition is true for all PyTorch internal libraries, it
//       just excludes external projects such as torch_xla which
//       re-use some of the PyTorch codegen machinery.
#if defined(CAFFE2_BUILD_MAIN_LIB)        || \
    defined(TORCH_CUDA_BUILD_MAIN_LIB)    || \
    defined(TORCH_HIP_BUILD_MAIN_LIB)     || \
    defined(TORCH_XPU_BUILD_MAIN_LIB)     || \
    defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
    defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#endif

// @generated by torchgen/gen.py from RegisterDispatchKey.cpp

#include <c10/core/TensorImpl.h>
#include <c10/core/Allocator.h>
#include <ATen/DeviceGuard.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/Dispatch.h>
#include <c10/util/ExclusivelyOwned.h>
#include <c10/util/Half.h>
#include <c10/core/UndefinedTensorImpl.h>
#include <optional>
#include <ATen/Tensor.h>
#include <ATen/native/Resize.h>

#include <cstddef>
#include <functional>
#include <memory>
#include <utility>

#include <ATen/Config.h>
#include <ATen/core/op_registration/adaption.h>
#include <torch/library.h>


#include <ATen/ops/as_strided_native.h>
#include <ATen/EmptyTensor.h>
#include <c10/macros/Macros.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool2d_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool2d_native.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool3d_cpu_dispatch.h>
#include <ATen/ops/_adaptive_avg_pool3d_native.h>
#include <ATen/ops/_add_relu_cpu_dispatch.h>
#include <ATen/ops/_add_relu_native.h>
#include <ATen/ops/_addmm_activation_cpu_dispatch.h>
#include <ATen/ops/_addmm_activation_native.h>
#include <ATen/ops/_aminmax_cpu_dispatch.h>
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_cpu_dispatch.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h>
#include <ATen/ops/_amp_update_scale_cpu_dispatch.h>
#include <ATen/ops/_amp_update_scale_native.h>
#include <ATen/ops/_assert_async_cpu_dispatch.h>
#include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_batch_norm_with_update_cpu_dispatch.h>
#include <ATen/ops/_batch_norm_with_update_native.h>
#include <ATen/ops/_cdist_backward_cpu_dispatch.h>
#include <ATen/ops/_cdist_backward_native.h>
#include <ATen/ops/_cdist_forward_cpu_dispatch.h>
#include <ATen/ops/_cdist_forward_native.h>
#include <ATen/ops/_cholesky_solve_helper_cpu_dispatch.h>
#include <ATen/ops/_cholesky_solve_helper_native.h>
#include <ATen/ops/_compute_linear_combination_cpu_dispatch.h>
#include <ATen/ops/_compute_linear_combination_native.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr_cpu_dispatch.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr_native.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo_native.h>
#include <ATen/ops/_convert_weight_to_int4pack_for_cpu_cpu_dispatch.h>
#include <ATen/ops/_convert_weight_to_int4pack_for_cpu_native.h>
#include <ATen/ops/_ctc_loss_backward_cpu_dispatch.h>
#include <ATen/ops/_ctc_loss_backward_native.h>
#include <ATen/ops/_ctc_loss_cpu_dispatch.h>
#include <ATen/ops/_ctc_loss_native.h>
#include <ATen/ops/_cummax_helper_cpu_dispatch.h>
#include <ATen/ops/_cummax_helper_native.h>
#include <ATen/ops/_cummin_helper_cpu_dispatch.h>
#include <ATen/ops/_cummin_helper_native.h>
#include <ATen/ops/_dirichlet_grad_cpu_dispatch.h>
#include <ATen/ops/_dirichlet_grad_native.h>
#include <ATen/ops/_dyn_quant_matmul_4bit_cpu_dispatch.h>
#include <ATen/ops/_dyn_quant_matmul_4bit_native.h>
#include <ATen/ops/_dyn_quant_pack_4bit_weight_cpu_dispatch.h>
#include <ATen/ops/_dyn_quant_pack_4bit_weight_native.h>
#include <ATen/ops/_efficientzerotensor_cpu_dispatch.h>
#include <ATen/ops/_efficientzerotensor_native.h>
#include <ATen/ops/_embedding_bag_backward_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_backward_native.h>
#include <ATen/ops/_embedding_bag_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_dense_backward_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_dense_backward_native.h>
#include <ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_forward_only_native.h>
#include <ATen/ops/_embedding_bag_native.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_cpu_dispatch.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
#include <ATen/ops/_empty_affine_quantized_cpu_dispatch.h>
#include <ATen/ops/_empty_affine_quantized_native.h>
#include <ATen/ops/_empty_per_channel_affine_quantized_cpu_dispatch.h>
#include <ATen/ops/_empty_per_channel_affine_quantized_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cpu_dispatch.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h>
#include <ATen/ops/_fft_c2c_cpu_dispatch.h>
#include <ATen/ops/_fft_c2c_native.h>
#include <ATen/ops/_fft_c2r_cpu_dispatch.h>
#include <ATen/ops/_fft_c2r_native.h>
#include <ATen/ops/_fft_r2c_cpu_dispatch.h>
#include <ATen/ops/_fft_r2c_native.h>
#include <ATen/ops/_foobar_cpu_dispatch.h>
#include <ATen/ops/_foobar_native.h>
#include <ATen/ops/_functional_assert_async_cpu_dispatch.h>
#include <ATen/ops/_functional_assert_async_native.h>
#include <ATen/ops/_fused_adagrad_cpu_dispatch.h>
#include <ATen/ops/_fused_adagrad_native.h>
#include <ATen/ops/_fused_adam_cpu_dispatch.h>
#include <ATen/ops/_fused_adam_native.h>
#include <ATen/ops/_fused_adamw_cpu_dispatch.h>
#include <ATen/ops/_fused_adamw_native.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_cpu_dispatch.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h>
#include <ATen/ops/_fused_sdp_choice_cpu_dispatch.h>
#include <ATen/ops/_fused_sdp_choice_native.h>
#include <ATen/ops/_fused_sgd_cpu_dispatch.h>
#include <ATen/ops/_fused_sgd_native.h>
#include <ATen/ops/_histogramdd_bin_edges_cpu_dispatch.h>
#include <ATen/ops/_histogramdd_bin_edges_native.h>
#include <ATen/ops/_histogramdd_from_bin_cts_cpu_dispatch.h>
#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
#include <ATen/ops/_histogramdd_from_bin_tensors_cpu_dispatch.h>
#include <ATen/ops/_histogramdd_from_bin_tensors_native.h>
#include <ATen/ops/_index_put_impl_cpu_dispatch.h>
#include <ATen/ops/_index_put_impl_native.h>
#include <ATen/ops/_int_mm_cpu_dispatch.h>
#include <ATen/ops/_int_mm_native.h>
#include <ATen/ops/_jagged_to_padded_dense_forward_cpu_dispatch.h>
#include <ATen/ops/_jagged_to_padded_dense_forward_native.h>
#include <ATen/ops/_linalg_det_cpu_dispatch.h>
#include <ATen/ops/_linalg_det_native.h>
#include <ATen/ops/_linalg_eigh_cpu_dispatch.h>
#include <ATen/ops/_linalg_eigh_native.h>
#include <ATen/ops/_linalg_eigvals_cpu_dispatch.h>
#include <ATen/ops/_linalg_eigvals_native.h>
#include <ATen/ops/_linalg_slogdet_cpu_dispatch.h>
#include <ATen/ops/_linalg_slogdet_native.h>
#include <ATen/ops/_linalg_solve_ex_cpu_dispatch.h>
#include <ATen/ops/_linalg_solve_ex_native.h>
#include <ATen/ops/_linalg_svd_cpu_dispatch.h>
#include <ATen/ops/_linalg_svd_native.h>
#include <ATen/ops/_local_scalar_dense_cpu_dispatch.h>
#include <ATen/ops/_local_scalar_dense_native.h>
#include <ATen/ops/_log_softmax_backward_data_cpu_dispatch.h>
#include <ATen/ops/_log_softmax_backward_data_native.h>
#include <ATen/ops/_log_softmax_cpu_dispatch.h>
#include <ATen/ops/_log_softmax_native.h>
#include <ATen/ops/_logcumsumexp_cpu_dispatch.h>
#include <ATen/ops/_logcumsumexp_native.h>
#include <ATen/ops/_make_dep_token_cpu_dispatch.h>
#include <ATen/ops/_make_dep_token_native.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_cpu_dispatch.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
#include <ATen/ops/_masked_softmax_backward_cpu_dispatch.h>
#include <ATen/ops/_masked_softmax_backward_native.h>
#include <ATen/ops/_masked_softmax_cpu_dispatch.h>
#include <ATen/ops/_masked_softmax_native.h>
#include <ATen/ops/_native_batch_norm_legit_cpu_dispatch.h>
#include <ATen/ops/_native_batch_norm_legit_native.h>
#include <ATen/ops/_native_multi_head_attention_cpu_dispatch.h>
#include <ATen/ops/_native_multi_head_attention_native.h>
#include <ATen/ops/_nested_compute_contiguous_strides_offsets_cpu_dispatch.h>
#include <ATen/ops/_nested_compute_contiguous_strides_offsets_native.h>
#include <ATen/ops/_nested_from_padded_cpu_dispatch.h>
#include <ATen/ops/_nested_from_padded_native.h>
#include <ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h>
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_cpu_dispatch.h>
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_native.h>
#include <ATen/ops/_nested_tensor_from_mask_native.h>
#include <ATen/ops/_nested_view_from_buffer_cpu_dispatch.h>
#include <ATen/ops/_nested_view_from_buffer_native.h>
#include <ATen/ops/_padded_dense_to_jagged_forward_cpu_dispatch.h>
#include <ATen/ops/_padded_dense_to_jagged_forward_native.h>
#include <ATen/ops/_pdist_backward_cpu_dispatch.h>
#include <ATen/ops/_pdist_backward_native.h>
#include <ATen/ops/_pdist_forward_cpu_dispatch.h>
#include <ATen/ops/_pdist_forward_native.h>
#include <ATen/ops/_prelu_kernel_backward_cpu_dispatch.h>
#include <ATen/ops/_prelu_kernel_backward_native.h>
#include <ATen/ops/_prelu_kernel_cpu_dispatch.h>
#include <ATen/ops/_prelu_kernel_native.h>
#include <ATen/ops/_reshape_alias_cpu_dispatch.h>
#include <ATen/ops/_reshape_alias_native.h>
#include <ATen/ops/_sample_dirichlet_cpu_dispatch.h>
#include <ATen/ops/_sample_dirichlet_native.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_cpu_dispatch.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_native.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_cpu_dispatch.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_native.h>
#include <ATen/ops/_segment_reduce_backward_cpu_dispatch.h>
#include <ATen/ops/_segment_reduce_backward_native.h>
#include <ATen/ops/_slow_conv2d_backward_cpu_dispatch.h>
#include <ATen/ops/_slow_conv2d_backward_native.h>
#include <ATen/ops/_slow_conv2d_forward_cpu_dispatch.h>
#include <ATen/ops/_slow_conv2d_forward_native.h>
#include <ATen/ops/_softmax_backward_data_cpu_dispatch.h>
#include <ATen/ops/_softmax_backward_data_native.h>
#include <ATen/ops/_softmax_cpu_dispatch.h>
#include <ATen/ops/_softmax_native.h>
#include <ATen/ops/_spdiags_cpu_dispatch.h>
#include <ATen/ops/_spdiags_native.h>
#include <ATen/ops/_stack_cpu_dispatch.h>
#include <ATen/ops/_stack_native.h>
#include <ATen/ops/_standard_gamma_cpu_dispatch.h>
#include <ATen/ops/_standard_gamma_grad_cpu_dispatch.h>
#include <ATen/ops/_standard_gamma_grad_native.h>
#include <ATen/ops/_standard_gamma_native.h>
#include <ATen/ops/_test_functorch_fallback_cpu_dispatch.h>
#include <ATen/ops/_test_functorch_fallback_native.h>
#include <ATen/ops/_test_optional_filled_intlist_cpu_dispatch.h>
#include <ATen/ops/_test_optional_filled_intlist_native.h>
#include <ATen/ops/_test_optional_floatlist_cpu_dispatch.h>
#include <ATen/ops/_test_optional_floatlist_native.h>
#include <ATen/ops/_test_optional_intlist_cpu_dispatch.h>
#include <ATen/ops/_test_optional_intlist_native.h>
#include <ATen/ops/_to_sparse_bsc_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_bsc_native.h>
#include <ATen/ops/_to_sparse_bsr_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_bsr_native.h>
#include <ATen/ops/_to_sparse_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_csc_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_csc_native.h>
#include <ATen/ops/_to_sparse_csr_cpu_dispatch.h>
#include <ATen/ops/_to_sparse_csr_native.h>
#include <ATen/ops/_to_sparse_native.h>
#include <ATen/ops/_transform_bias_rescale_qkv_cpu_dispatch.h>
#include <ATen/ops/_transform_bias_rescale_qkv_native.h>
#include <ATen/ops/_transformer_encoder_layer_fwd_cpu_dispatch.h>
#include <ATen/ops/_transformer_encoder_layer_fwd_native.h>
#include <ATen/ops/_unique2_cpu_dispatch.h>
#include <ATen/ops/_unique2_native.h>
#include <ATen/ops/_unique_cpu_dispatch.h>
#include <ATen/ops/_unique_native.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h>
#include <ATen/ops/_upsample_bicubic2d_aa_cpu_dispatch.h>
#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h>
#include <ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h>
#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact1d_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact1d_native.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact2d_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact2d_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h>
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
#include <ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h>
#include <ATen/ops/_validate_compressed_sparse_indices_native.h>
#include <ATen/ops/_weight_int4pack_mm_for_cpu_cpu_dispatch.h>
#include <ATen/ops/_weight_int4pack_mm_for_cpu_native.h>
#include <ATen/ops/_weight_int8pack_mm_cpu_dispatch.h>
#include <ATen/ops/_weight_int8pack_mm_native.h>
#include <ATen/ops/_weight_norm_interface_backward_cpu_dispatch.h>
#include <ATen/ops/_weight_norm_interface_backward_native.h>
#include <ATen/ops/_weight_norm_interface_cpu_dispatch.h>
#include <ATen/ops/_weight_norm_interface_native.h>
#include <ATen/ops/abs_cpu_dispatch.h>
#include <ATen/ops/abs_native.h>
#include <ATen/ops/acos_cpu_dispatch.h>
#include <ATen/ops/acos_native.h>
#include <ATen/ops/acosh_cpu_dispatch.h>
#include <ATen/ops/acosh_native.h>
#include <ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h>
#include <ATen/ops/adaptive_avg_pool2d_native.h>
#include <ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/adaptive_avg_pool3d_backward_native.h>
#include <ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h>
#include <ATen/ops/adaptive_avg_pool3d_native.h>
#include <ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
#include <ATen/ops/adaptive_max_pool2d_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool2d_native.h>
#include <ATen/ops/adaptive_max_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool3d_backward_native.h>
#include <ATen/ops/adaptive_max_pool3d_cpu_dispatch.h>
#include <ATen/ops/adaptive_max_pool3d_native.h>
#include <ATen/ops/add_cpu_dispatch.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/addbmm_cpu_dispatch.h>
#include <ATen/ops/addbmm_native.h>
#include <ATen/ops/addcdiv_cpu_dispatch.h>
#include <ATen/ops/addcdiv_native.h>
#include <ATen/ops/addcmul_cpu_dispatch.h>
#include <ATen/ops/addcmul_native.h>
#include <ATen/ops/addmm_cpu_dispatch.h>
#include <ATen/ops/addmm_native.h>
#include <ATen/ops/addmv_cpu_dispatch.h>
#include <ATen/ops/addmv_native.h>
#include <ATen/ops/addr_cpu_dispatch.h>
#include <ATen/ops/addr_native.h>
#include <ATen/ops/all_cpu_dispatch.h>
#include <ATen/ops/all_native.h>
#include <ATen/ops/amax_cpu_dispatch.h>
#include <ATen/ops/amax_native.h>
#include <ATen/ops/amin_cpu_dispatch.h>
#include <ATen/ops/amin_native.h>
#include <ATen/ops/aminmax_cpu_dispatch.h>
#include <ATen/ops/aminmax_native.h>
#include <ATen/ops/angle_cpu_dispatch.h>
#include <ATen/ops/angle_native.h>
#include <ATen/ops/any_cpu_dispatch.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/arange_cpu_dispatch.h>
#include <ATen/ops/arange_native.h>
#include <ATen/ops/argmax_cpu_dispatch.h>
#include <ATen/ops/argmax_native.h>
#include <ATen/ops/argmin_cpu_dispatch.h>
#include <ATen/ops/argmin_native.h>
#include <ATen/ops/as_strided_cpu_dispatch.h>
#include <ATen/ops/as_strided_native.h>
#include <ATen/ops/asin_cpu_dispatch.h>
#include <ATen/ops/asin_native.h>
#include <ATen/ops/asinh_cpu_dispatch.h>
#include <ATen/ops/asinh_native.h>
#include <ATen/ops/atan2_cpu_dispatch.h>
#include <ATen/ops/atan2_native.h>
#include <ATen/ops/atan_cpu_dispatch.h>
#include <ATen/ops/atan_native.h>
#include <ATen/ops/atanh_cpu_dispatch.h>
#include <ATen/ops/atanh_native.h>
#include <ATen/ops/avg_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/avg_pool2d_backward_native.h>
#include <ATen/ops/avg_pool2d_cpu_dispatch.h>
#include <ATen/ops/avg_pool2d_native.h>
#include <ATen/ops/avg_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/avg_pool3d_backward_native.h>
#include <ATen/ops/avg_pool3d_cpu_dispatch.h>
#include <ATen/ops/avg_pool3d_native.h>
#include <ATen/ops/baddbmm_cpu_dispatch.h>
#include <ATen/ops/baddbmm_native.h>
#include <ATen/ops/batch_norm_backward_cpu_dispatch.h>
#include <ATen/ops/batch_norm_backward_native.h>
#include <ATen/ops/batch_norm_update_stats_cpu_dispatch.h>
#include <ATen/ops/batch_norm_update_stats_native.h>
#include <ATen/ops/bernoulli_cpu_dispatch.h>
#include <ATen/ops/bernoulli_native.h>
#include <ATen/ops/binary_cross_entropy_backward_cpu_dispatch.h>
#include <ATen/ops/binary_cross_entropy_backward_native.h>
#include <ATen/ops/binary_cross_entropy_cpu_dispatch.h>
#include <ATen/ops/binary_cross_entropy_native.h>
#include <ATen/ops/bincount_cpu_dispatch.h>
#include <ATen/ops/bincount_native.h>
#include <ATen/ops/binomial_cpu_dispatch.h>
#include <ATen/ops/binomial_native.h>
#include <ATen/ops/bitwise_and_cpu_dispatch.h>
#include <ATen/ops/bitwise_and_native.h>
#include <ATen/ops/bitwise_left_shift_cpu_dispatch.h>
#include <ATen/ops/bitwise_left_shift_native.h>
#include <ATen/ops/bitwise_not_cpu_dispatch.h>
#include <ATen/ops/bitwise_not_native.h>
#include <ATen/ops/bitwise_or_cpu_dispatch.h>
#include <ATen/ops/bitwise_or_native.h>
#include <ATen/ops/bitwise_right_shift_cpu_dispatch.h>
#include <ATen/ops/bitwise_right_shift_native.h>
#include <ATen/ops/bitwise_xor_cpu_dispatch.h>
#include <ATen/ops/bitwise_xor_native.h>
#include <ATen/ops/bmm_cpu_dispatch.h>
#include <ATen/ops/bmm_native.h>
#include <ATen/ops/bucketize_cpu_dispatch.h>
#include <ATen/ops/bucketize_native.h>
#include <ATen/ops/cat_cpu_dispatch.h>
#include <ATen/ops/cat_native.h>
#include <ATen/ops/cauchy_cpu_dispatch.h>
#include <ATen/ops/cauchy_native.h>
#include <ATen/ops/ceil_cpu_dispatch.h>
#include <ATen/ops/ceil_native.h>
#include <ATen/ops/channel_shuffle_cpu_dispatch.h>
#include <ATen/ops/channel_shuffle_native.h>
#include <ATen/ops/cholesky_cpu_dispatch.h>
#include <ATen/ops/cholesky_inverse_cpu_dispatch.h>
#include <ATen/ops/cholesky_inverse_native.h>
#include <ATen/ops/cholesky_native.h>
#include <ATen/ops/clamp_cpu_dispatch.h>
#include <ATen/ops/clamp_max_cpu_dispatch.h>
#include <ATen/ops/clamp_max_native.h>
#include <ATen/ops/clamp_min_cpu_dispatch.h>
#include <ATen/ops/clamp_min_native.h>
#include <ATen/ops/clamp_native.h>
#include <ATen/ops/col2im_cpu_dispatch.h>
#include <ATen/ops/col2im_native.h>
#include <ATen/ops/complex_cpu_dispatch.h>
#include <ATen/ops/complex_native.h>
#include <ATen/ops/conj_physical_cpu_dispatch.h>
#include <ATen/ops/conj_physical_native.h>
#include <ATen/ops/copysign_cpu_dispatch.h>
#include <ATen/ops/copysign_native.h>
#include <ATen/ops/cos_cpu_dispatch.h>
#include <ATen/ops/cos_native.h>
#include <ATen/ops/cosh_cpu_dispatch.h>
#include <ATen/ops/cosh_native.h>
#include <ATen/ops/count_nonzero_cpu_dispatch.h>
#include <ATen/ops/count_nonzero_native.h>
#include <ATen/ops/cumprod_cpu_dispatch.h>
#include <ATen/ops/cumprod_native.h>
#include <ATen/ops/cumsum_cpu_dispatch.h>
#include <ATen/ops/cumsum_native.h>
#include <ATen/ops/dequantize_cpu_dispatch.h>
#include <ATen/ops/dequantize_native.h>
#include <ATen/ops/digamma_cpu_dispatch.h>
#include <ATen/ops/digamma_native.h>
#include <ATen/ops/div_cpu_dispatch.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/dot_cpu_dispatch.h>
#include <ATen/ops/dot_native.h>
#include <ATen/ops/elu_backward_cpu_dispatch.h>
#include <ATen/ops/elu_backward_native.h>
#include <ATen/ops/elu_cpu_dispatch.h>
#include <ATen/ops/elu_native.h>
#include <ATen/ops/embedding_dense_backward_cpu_dispatch.h>
#include <ATen/ops/embedding_dense_backward_native.h>
#include <ATen/ops/embedding_renorm_cpu_dispatch.h>
#include <ATen/ops/embedding_renorm_native.h>
#include <ATen/ops/empty_cpu_dispatch.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/empty_strided_cpu_dispatch.h>
#include <ATen/ops/empty_strided_native.h>
#include <ATen/ops/eq_cpu_dispatch.h>
#include <ATen/ops/eq_native.h>
#include <ATen/ops/equal_cpu_dispatch.h>
#include <ATen/ops/equal_native.h>
#include <ATen/ops/erf_cpu_dispatch.h>
#include <ATen/ops/erf_native.h>
#include <ATen/ops/erfc_cpu_dispatch.h>
#include <ATen/ops/erfc_native.h>
#include <ATen/ops/erfinv_cpu_dispatch.h>
#include <ATen/ops/erfinv_native.h>
#include <ATen/ops/exp2_cpu_dispatch.h>
#include <ATen/ops/exp2_native.h>
#include <ATen/ops/exp_cpu_dispatch.h>
#include <ATen/ops/exp_native.h>
#include <ATen/ops/expm1_cpu_dispatch.h>
#include <ATen/ops/expm1_native.h>
#include <ATen/ops/exponential_cpu_dispatch.h>
#include <ATen/ops/exponential_native.h>
#include <ATen/ops/eye_cpu_dispatch.h>
#include <ATen/ops/eye_native.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_cpu_dispatch.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h>
#include <ATen/ops/fill_cpu_dispatch.h>
#include <ATen/ops/fill_native.h>
#include <ATen/ops/flip_cpu_dispatch.h>
#include <ATen/ops/flip_native.h>
#include <ATen/ops/floor_cpu_dispatch.h>
#include <ATen/ops/floor_divide_cpu_dispatch.h>
#include <ATen/ops/floor_divide_native.h>
#include <ATen/ops/floor_native.h>
#include <ATen/ops/fmax_cpu_dispatch.h>
#include <ATen/ops/fmax_native.h>
#include <ATen/ops/fmin_cpu_dispatch.h>
#include <ATen/ops/fmin_native.h>
#include <ATen/ops/fmod_cpu_dispatch.h>
#include <ATen/ops/fmod_native.h>
#include <ATen/ops/frac_cpu_dispatch.h>
#include <ATen/ops/frac_native.h>
#include <ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool2d_backward_native.h>
#include <ATen/ops/fractional_max_pool2d_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool2d_native.h>
#include <ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool3d_backward_native.h>
#include <ATen/ops/fractional_max_pool3d_cpu_dispatch.h>
#include <ATen/ops/fractional_max_pool3d_native.h>
#include <ATen/ops/frexp_cpu_dispatch.h>
#include <ATen/ops/frexp_native.h>
#include <ATen/ops/from_file_cpu_dispatch.h>
#include <ATen/ops/from_file_native.h>
#include <ATen/ops/gather_cpu_dispatch.h>
#include <ATen/ops/gather_native.h>
#include <ATen/ops/gcd_cpu_dispatch.h>
#include <ATen/ops/gcd_native.h>
#include <ATen/ops/ge_cpu_dispatch.h>
#include <ATen/ops/ge_native.h>
#include <ATen/ops/gelu_backward_cpu_dispatch.h>
#include <ATen/ops/gelu_backward_native.h>
#include <ATen/ops/gelu_cpu_dispatch.h>
#include <ATen/ops/gelu_native.h>
#include <ATen/ops/geometric_cpu_dispatch.h>
#include <ATen/ops/geometric_native.h>
#include <ATen/ops/geqrf_cpu_dispatch.h>
#include <ATen/ops/geqrf_native.h>
#include <ATen/ops/glu_backward_cpu_dispatch.h>
#include <ATen/ops/glu_backward_jvp_cpu_dispatch.h>
#include <ATen/ops/glu_backward_jvp_native.h>
#include <ATen/ops/glu_backward_native.h>
#include <ATen/ops/glu_cpu_dispatch.h>
#include <ATen/ops/glu_jvp_cpu_dispatch.h>
#include <ATen/ops/glu_jvp_native.h>
#include <ATen/ops/glu_native.h>
#include <ATen/ops/grid_sampler_2d_backward_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_2d_backward_native.h>
#include <ATen/ops/grid_sampler_2d_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_2d_native.h>
#include <ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_3d_backward_native.h>
#include <ATen/ops/grid_sampler_3d_cpu_dispatch.h>
#include <ATen/ops/grid_sampler_3d_native.h>
#include <ATen/ops/gt_cpu_dispatch.h>
#include <ATen/ops/gt_native.h>
#include <ATen/ops/hardshrink_backward_cpu_dispatch.h>
#include <ATen/ops/hardshrink_backward_native.h>
#include <ATen/ops/hardshrink_cpu_dispatch.h>
#include <ATen/ops/hardshrink_native.h>
#include <ATen/ops/hardsigmoid_backward_cpu_dispatch.h>
#include <ATen/ops/hardsigmoid_backward_native.h>
#include <ATen/ops/hardsigmoid_cpu_dispatch.h>
#include <ATen/ops/hardsigmoid_native.h>
#include <ATen/ops/hardswish_backward_cpu_dispatch.h>
#include <ATen/ops/hardswish_backward_native.h>
#include <ATen/ops/hardswish_cpu_dispatch.h>
#include <ATen/ops/hardswish_native.h>
#include <ATen/ops/hardtanh_backward_cpu_dispatch.h>
#include <ATen/ops/hardtanh_backward_native.h>
#include <ATen/ops/hardtanh_cpu_dispatch.h>
#include <ATen/ops/hardtanh_native.h>
#include <ATen/ops/heaviside_cpu_dispatch.h>
#include <ATen/ops/heaviside_native.h>
#include <ATen/ops/histc_cpu_dispatch.h>
#include <ATen/ops/histc_native.h>
#include <ATen/ops/histogram_cpu_dispatch.h>
#include <ATen/ops/histogram_native.h>
#include <ATen/ops/huber_loss_backward_cpu_dispatch.h>
#include <ATen/ops/huber_loss_backward_native.h>
#include <ATen/ops/huber_loss_cpu_dispatch.h>
#include <ATen/ops/huber_loss_native.h>
#include <ATen/ops/hypot_cpu_dispatch.h>
#include <ATen/ops/hypot_native.h>
#include <ATen/ops/i0_cpu_dispatch.h>
#include <ATen/ops/i0_native.h>
#include <ATen/ops/igamma_cpu_dispatch.h>
#include <ATen/ops/igamma_native.h>
#include <ATen/ops/igammac_cpu_dispatch.h>
#include <ATen/ops/igammac_native.h>
#include <ATen/ops/im2col_cpu_dispatch.h>
#include <ATen/ops/im2col_native.h>
#include <ATen/ops/index_add_cpu_dispatch.h>
#include <ATen/ops/index_add_native.h>
#include <ATen/ops/index_copy_cpu_dispatch.h>
#include <ATen/ops/index_copy_native.h>
#include <ATen/ops/index_cpu_dispatch.h>
#include <ATen/ops/index_fill_cpu_dispatch.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_native.h>
#include <ATen/ops/index_reduce_cpu_dispatch.h>
#include <ATen/ops/index_reduce_native.h>
#include <ATen/ops/index_select_cpu_dispatch.h>
#include <ATen/ops/index_select_native.h>
#include <ATen/ops/is_set_to_cpu_dispatch.h>
#include <ATen/ops/is_set_to_native.h>
#include <ATen/ops/isin_cpu_dispatch.h>
#include <ATen/ops/isin_native.h>
#include <ATen/ops/isnan_cpu_dispatch.h>
#include <ATen/ops/isnan_native.h>
#include <ATen/ops/isneginf_cpu_dispatch.h>
#include <ATen/ops/isneginf_native.h>
#include <ATen/ops/isposinf_cpu_dispatch.h>
#include <ATen/ops/isposinf_native.h>
#include <ATen/ops/kthvalue_cpu_dispatch.h>
#include <ATen/ops/kthvalue_native.h>
#include <ATen/ops/lcm_cpu_dispatch.h>
#include <ATen/ops/lcm_native.h>
#include <ATen/ops/le_cpu_dispatch.h>
#include <ATen/ops/le_native.h>
#include <ATen/ops/leaky_relu_backward_cpu_dispatch.h>
#include <ATen/ops/leaky_relu_backward_native.h>
#include <ATen/ops/leaky_relu_cpu_dispatch.h>
#include <ATen/ops/leaky_relu_native.h>
#include <ATen/ops/lerp_cpu_dispatch.h>
#include <ATen/ops/lerp_native.h>
#include <ATen/ops/lgamma_cpu_dispatch.h>
#include <ATen/ops/lgamma_native.h>
#include <ATen/ops/linalg_cholesky_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_cholesky_ex_native.h>
#include <ATen/ops/linalg_cross_cpu_dispatch.h>
#include <ATen/ops/linalg_cross_native.h>
#include <ATen/ops/linalg_eig_cpu_dispatch.h>
#include <ATen/ops/linalg_eig_native.h>
#include <ATen/ops/linalg_eigvals_cpu_dispatch.h>
#include <ATen/ops/linalg_eigvals_native.h>
#include <ATen/ops/linalg_householder_product_cpu_dispatch.h>
#include <ATen/ops/linalg_householder_product_native.h>
#include <ATen/ops/linalg_inv_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_inv_ex_native.h>
#include <ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_ldl_factor_ex_native.h>
#include <ATen/ops/linalg_ldl_solve_cpu_dispatch.h>
#include <ATen/ops/linalg_ldl_solve_native.h>
#include <ATen/ops/linalg_lstsq_cpu_dispatch.h>
#include <ATen/ops/linalg_lstsq_native.h>
#include <ATen/ops/linalg_lu_cpu_dispatch.h>
#include <ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h>
#include <ATen/ops/linalg_lu_factor_ex_native.h>
#include <ATen/ops/linalg_lu_native.h>
#include <ATen/ops/linalg_lu_solve_cpu_dispatch.h>
#include <ATen/ops/linalg_lu_solve_native.h>
#include <ATen/ops/linalg_matrix_exp_cpu_dispatch.h>
#include <ATen/ops/linalg_matrix_exp_native.h>
#include <ATen/ops/linalg_qr_cpu_dispatch.h>
#include <ATen/ops/linalg_qr_native.h>
#include <ATen/ops/linalg_solve_triangular_cpu_dispatch.h>
#include <ATen/ops/linalg_solve_triangular_native.h>
#include <ATen/ops/linalg_vector_norm_cpu_dispatch.h>
#include <ATen/ops/linalg_vector_norm_native.h>
#include <ATen/ops/linspace_cpu_dispatch.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/log10_cpu_dispatch.h>
#include <ATen/ops/log10_native.h>
#include <ATen/ops/log1p_cpu_dispatch.h>
#include <ATen/ops/log1p_native.h>
#include <ATen/ops/log2_cpu_dispatch.h>
#include <ATen/ops/log2_native.h>
#include <ATen/ops/log_cpu_dispatch.h>
#include <ATen/ops/log_native.h>
#include <ATen/ops/log_normal_cpu_dispatch.h>
#include <ATen/ops/log_normal_native.h>
#include <ATen/ops/log_sigmoid_backward_cpu_dispatch.h>
#include <ATen/ops/log_sigmoid_backward_native.h>
#include <ATen/ops/log_sigmoid_forward_cpu_dispatch.h>
#include <ATen/ops/log_sigmoid_forward_native.h>
#include <ATen/ops/logaddexp2_cpu_dispatch.h>
#include <ATen/ops/logaddexp2_native.h>
#include <ATen/ops/logaddexp_cpu_dispatch.h>
#include <ATen/ops/logaddexp_native.h>
#include <ATen/ops/logical_and_cpu_dispatch.h>
#include <ATen/ops/logical_and_native.h>
#include <ATen/ops/logical_not_cpu_dispatch.h>
#include <ATen/ops/logical_not_native.h>
#include <ATen/ops/logical_or_cpu_dispatch.h>
#include <ATen/ops/logical_or_native.h>
#include <ATen/ops/logical_xor_cpu_dispatch.h>
#include <ATen/ops/logical_xor_native.h>
#include <ATen/ops/logit_backward_cpu_dispatch.h>
#include <ATen/ops/logit_backward_native.h>
#include <ATen/ops/logit_cpu_dispatch.h>
#include <ATen/ops/logit_native.h>
#include <ATen/ops/logspace_cpu_dispatch.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/lshift_cpu_dispatch.h>
#include <ATen/ops/lshift_native.h>
#include <ATen/ops/lt_cpu_dispatch.h>
#include <ATen/ops/lt_native.h>
#include <ATen/ops/lu_unpack_cpu_dispatch.h>
#include <ATen/ops/lu_unpack_native.h>
#include <ATen/ops/masked_fill_cpu_dispatch.h>
#include <ATen/ops/masked_fill_native.h>
#include <ATen/ops/masked_scatter_cpu_dispatch.h>
#include <ATen/ops/masked_scatter_native.h>
#include <ATen/ops/masked_select_cpu_dispatch.h>
#include <ATen/ops/masked_select_native.h>
#include <ATen/ops/max_cpu_dispatch.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h>
#include <ATen/ops/max_pool2d_with_indices_backward_native.h>
#include <ATen/ops/max_pool2d_with_indices_cpu_dispatch.h>
#include <ATen/ops/max_pool2d_with_indices_native.h>
#include <ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h>
#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
#include <ATen/ops/max_pool3d_with_indices_cpu_dispatch.h>
#include <ATen/ops/max_pool3d_with_indices_native.h>
#include <ATen/ops/max_unpool2d_cpu_dispatch.h>
#include <ATen/ops/max_unpool2d_native.h>
#include <ATen/ops/max_unpool3d_cpu_dispatch.h>
#include <ATen/ops/max_unpool3d_native.h>
#include <ATen/ops/maximum_cpu_dispatch.h>
#include <ATen/ops/maximum_native.h>
#include <ATen/ops/mean_cpu_dispatch.h>
#include <ATen/ops/mean_native.h>
#include <ATen/ops/median_cpu_dispatch.h>
#include <ATen/ops/median_native.h>
#include <ATen/ops/min_cpu_dispatch.h>
#include <ATen/ops/min_native.h>
#include <ATen/ops/minimum_cpu_dispatch.h>
#include <ATen/ops/minimum_native.h>
#include <ATen/ops/mish_backward_cpu_dispatch.h>
#include <ATen/ops/mish_backward_native.h>
#include <ATen/ops/mish_cpu_dispatch.h>
#include <ATen/ops/mish_native.h>
#include <ATen/ops/mkldnn_rnn_layer_backward_cpu_dispatch.h>
#include <ATen/ops/mkldnn_rnn_layer_backward_native.h>
#include <ATen/ops/mkldnn_rnn_layer_cpu_dispatch.h>
#include <ATen/ops/mkldnn_rnn_layer_native.h>
#include <ATen/ops/mm_cpu_dispatch.h>
#include <ATen/ops/mm_native.h>
#include <ATen/ops/mode_cpu_dispatch.h>
#include <ATen/ops/mode_native.h>
#include <ATen/ops/mse_loss_backward_cpu_dispatch.h>
#include <ATen/ops/mse_loss_backward_native.h>
#include <ATen/ops/mse_loss_cpu_dispatch.h>
#include <ATen/ops/mse_loss_native.h>
#include <ATen/ops/mul_cpu_dispatch.h>
#include <ATen/ops/mul_native.h>
#include <ATen/ops/multi_margin_loss_backward_cpu_dispatch.h>
#include <ATen/ops/multi_margin_loss_backward_native.h>
#include <ATen/ops/multi_margin_loss_cpu_dispatch.h>
#include <ATen/ops/multi_margin_loss_native.h>
#include <ATen/ops/multilabel_margin_loss_backward_cpu_dispatch.h>
#include <ATen/ops/multilabel_margin_loss_backward_native.h>
#include <ATen/ops/multilabel_margin_loss_forward_cpu_dispatch.h>
#include <ATen/ops/multilabel_margin_loss_forward_native.h>
#include <ATen/ops/multinomial_cpu_dispatch.h>
#include <ATen/ops/multinomial_native.h>
#include <ATen/ops/mvlgamma_cpu_dispatch.h>
#include <ATen/ops/mvlgamma_native.h>
#include <ATen/ops/nan_to_num_cpu_dispatch.h>
#include <ATen/ops/nan_to_num_native.h>
#include <ATen/ops/nanmedian_cpu_dispatch.h>
#include <ATen/ops/nanmedian_native.h>
#include <ATen/ops/nansum_cpu_dispatch.h>
#include <ATen/ops/nansum_native.h>
#include <ATen/ops/narrow_copy_cpu_dispatch.h>
#include <ATen/ops/narrow_copy_native.h>
#include <ATen/ops/native_batch_norm_backward_cpu_dispatch.h>
#include <ATen/ops/native_batch_norm_backward_native.h>
#include <ATen/ops/native_batch_norm_cpu_dispatch.h>
#include <ATen/ops/native_batch_norm_native.h>
#include <ATen/ops/native_channel_shuffle_cpu_dispatch.h>
#include <ATen/ops/native_channel_shuffle_native.h>
#include <ATen/ops/native_dropout_backward_cpu_dispatch.h>
#include <ATen/ops/native_dropout_backward_native.h>
#include <ATen/ops/native_dropout_cpu_dispatch.h>
#include <ATen/ops/native_dropout_native.h>
#include <ATen/ops/native_group_norm_backward_cpu_dispatch.h>
#include <ATen/ops/native_group_norm_backward_native.h>
#include <ATen/ops/native_group_norm_cpu_dispatch.h>
#include <ATen/ops/native_group_norm_native.h>
#include <ATen/ops/native_layer_norm_backward_cpu_dispatch.h>
#include <ATen/ops/native_layer_norm_backward_native.h>
#include <ATen/ops/native_layer_norm_cpu_dispatch.h>
#include <ATen/ops/native_layer_norm_native.h>
#include <ATen/ops/ne_cpu_dispatch.h>
#include <ATen/ops/ne_native.h>
#include <ATen/ops/neg_cpu_dispatch.h>
#include <ATen/ops/neg_native.h>
#include <ATen/ops/nextafter_cpu_dispatch.h>
#include <ATen/ops/nextafter_native.h>
#include <ATen/ops/nll_loss2d_backward_cpu_dispatch.h>
#include <ATen/ops/nll_loss2d_backward_native.h>
#include <ATen/ops/nll_loss2d_forward_cpu_dispatch.h>
#include <ATen/ops/nll_loss2d_forward_native.h>
#include <ATen/ops/nll_loss_backward_cpu_dispatch.h>
#include <ATen/ops/nll_loss_backward_native.h>
#include <ATen/ops/nll_loss_forward_cpu_dispatch.h>
#include <ATen/ops/nll_loss_forward_native.h>
#include <ATen/ops/nonzero_cpu_dispatch.h>
#include <ATen/ops/nonzero_native.h>
#include <ATen/ops/nonzero_static_cpu_dispatch.h>
#include <ATen/ops/nonzero_static_native.h>
#include <ATen/ops/norm_cpu_dispatch.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/normal_cpu_dispatch.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/ormqr_cpu_dispatch.h>
#include <ATen/ops/ormqr_native.h>
#include <ATen/ops/pixel_shuffle_cpu_dispatch.h>
#include <ATen/ops/pixel_shuffle_native.h>
#include <ATen/ops/pixel_unshuffle_cpu_dispatch.h>
#include <ATen/ops/pixel_unshuffle_native.h>
#include <ATen/ops/poisson_cpu_dispatch.h>
#include <ATen/ops/poisson_native.h>
#include <ATen/ops/polar_cpu_dispatch.h>
#include <ATen/ops/polar_native.h>
#include <ATen/ops/polygamma_cpu_dispatch.h>
#include <ATen/ops/polygamma_native.h>
#include <ATen/ops/pow_cpu_dispatch.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/prod_cpu_dispatch.h>
#include <ATen/ops/prod_native.h>
#include <ATen/ops/put_cpu_dispatch.h>
#include <ATen/ops/put_native.h>
#include <ATen/ops/quantize_per_channel_cpu_dispatch.h>
#include <ATen/ops/quantize_per_channel_native.h>
#include <ATen/ops/quantize_per_tensor_cpu_dispatch.h>
#include <ATen/ops/quantize_per_tensor_dynamic_cpu_dispatch.h>
#include <ATen/ops/quantize_per_tensor_dynamic_native.h>
#include <ATen/ops/quantize_per_tensor_native.h>
#include <ATen/ops/random_cpu_dispatch.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/randperm_cpu_dispatch.h>
#include <ATen/ops/randperm_native.h>
#include <ATen/ops/range_cpu_dispatch.h>
#include <ATen/ops/range_native.h>
#include <ATen/ops/reciprocal_cpu_dispatch.h>
#include <ATen/ops/reciprocal_native.h>
#include <ATen/ops/reflection_pad1d_backward_cpu_dispatch.h>
#include <ATen/ops/reflection_pad1d_backward_native.h>
#include <ATen/ops/reflection_pad1d_cpu_dispatch.h>
#include <ATen/ops/reflection_pad1d_native.h>
#include <ATen/ops/reflection_pad2d_backward_cpu_dispatch.h>
#include <ATen/ops/reflection_pad2d_backward_native.h>
#include <ATen/ops/reflection_pad2d_cpu_dispatch.h>
#include <ATen/ops/reflection_pad2d_native.h>
#include <ATen/ops/reflection_pad3d_backward_cpu_dispatch.h>
#include <ATen/ops/reflection_pad3d_backward_native.h>
#include <ATen/ops/reflection_pad3d_cpu_dispatch.h>
#include <ATen/ops/reflection_pad3d_native.h>
#include <ATen/ops/relu_cpu_dispatch.h>
#include <ATen/ops/relu_native.h>
#include <ATen/ops/remainder_cpu_dispatch.h>
#include <ATen/ops/remainder_native.h>
#include <ATen/ops/renorm_cpu_dispatch.h>
#include <ATen/ops/renorm_native.h>
#include <ATen/ops/repeat_interleave_cpu_dispatch.h>
#include <ATen/ops/repeat_interleave_native.h>
#include <ATen/ops/replication_pad1d_backward_cpu_dispatch.h>
#include <ATen/ops/replication_pad1d_backward_native.h>
#include <ATen/ops/replication_pad1d_cpu_dispatch.h>
#include <ATen/ops/replication_pad1d_native.h>
#include <ATen/ops/replication_pad2d_backward_cpu_dispatch.h>
#include <ATen/ops/replication_pad2d_backward_native.h>
#include <ATen/ops/replication_pad2d_cpu_dispatch.h>
#include <ATen/ops/replication_pad2d_native.h>
#include <ATen/ops/replication_pad3d_backward_cpu_dispatch.h>
#include <ATen/ops/replication_pad3d_backward_native.h>
#include <ATen/ops/replication_pad3d_cpu_dispatch.h>
#include <ATen/ops/replication_pad3d_native.h>
#include <ATen/ops/resize_cpu_dispatch.h>
#include <ATen/ops/resize_native.h>
#include <ATen/ops/roll_cpu_dispatch.h>
#include <ATen/ops/roll_native.h>
#include <ATen/ops/round_cpu_dispatch.h>
#include <ATen/ops/round_native.h>
#include <ATen/ops/rrelu_with_noise_cpu_dispatch.h>
#include <ATen/ops/rrelu_with_noise_native.h>
#include <ATen/ops/rshift_cpu_dispatch.h>
#include <ATen/ops/rshift_native.h>
#include <ATen/ops/rsqrt_cpu_dispatch.h>
#include <ATen/ops/rsqrt_native.h>
#include <ATen/ops/rsub_cpu_dispatch.h>
#include <ATen/ops/rsub_native.h>
#include <ATen/ops/scatter_add_cpu_dispatch.h>
#include <ATen/ops/scatter_add_native.h>
#include <ATen/ops/scatter_cpu_dispatch.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_reduce_cpu_dispatch.h>
#include <ATen/ops/scatter_reduce_native.h>
#include <ATen/ops/searchsorted_cpu_dispatch.h>
#include <ATen/ops/searchsorted_native.h>
#include <ATen/ops/segment_reduce_cpu_dispatch.h>
#include <ATen/ops/segment_reduce_native.h>
#include <ATen/ops/set_cpu_dispatch.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/sgn_cpu_dispatch.h>
#include <ATen/ops/sgn_native.h>
#include <ATen/ops/sigmoid_backward_cpu_dispatch.h>
#include <ATen/ops/sigmoid_backward_native.h>
#include <ATen/ops/sigmoid_cpu_dispatch.h>
#include <ATen/ops/sigmoid_native.h>
#include <ATen/ops/sign_cpu_dispatch.h>
#include <ATen/ops/sign_native.h>
#include <ATen/ops/signbit_cpu_dispatch.h>
#include <ATen/ops/signbit_native.h>
#include <ATen/ops/silu_backward_cpu_dispatch.h>
#include <ATen/ops/silu_backward_native.h>
#include <ATen/ops/silu_cpu_dispatch.h>
#include <ATen/ops/silu_native.h>
#include <ATen/ops/sin_cpu_dispatch.h>
#include <ATen/ops/sin_native.h>
#include <ATen/ops/sinc_cpu_dispatch.h>
#include <ATen/ops/sinc_native.h>
#include <ATen/ops/sinh_cpu_dispatch.h>
#include <ATen/ops/sinh_native.h>
#include <ATen/ops/slow_conv3d_forward_cpu_dispatch.h>
#include <ATen/ops/slow_conv3d_forward_native.h>
#include <ATen/ops/slow_conv_dilated2d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_dilated2d_native.h>
#include <ATen/ops/slow_conv_dilated3d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_dilated3d_native.h>
#include <ATen/ops/slow_conv_transpose2d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_transpose2d_native.h>
#include <ATen/ops/slow_conv_transpose3d_cpu_dispatch.h>
#include <ATen/ops/slow_conv_transpose3d_native.h>
#include <ATen/ops/smooth_l1_loss_backward_cpu_dispatch.h>
#include <ATen/ops/smooth_l1_loss_backward_native.h>
#include <ATen/ops/smooth_l1_loss_cpu_dispatch.h>
#include <ATen/ops/smooth_l1_loss_native.h>
#include <ATen/ops/softplus_backward_cpu_dispatch.h>
#include <ATen/ops/softplus_backward_native.h>
#include <ATen/ops/softplus_cpu_dispatch.h>
#include <ATen/ops/softplus_native.h>
#include <ATen/ops/softshrink_backward_cpu_dispatch.h>
#include <ATen/ops/softshrink_backward_native.h>
#include <ATen/ops/softshrink_cpu_dispatch.h>
#include <ATen/ops/softshrink_native.h>
#include <ATen/ops/sort_cpu_dispatch.h>
#include <ATen/ops/sort_native.h>
#include <ATen/ops/special_airy_ai_cpu_dispatch.h>
#include <ATen/ops/special_airy_ai_native.h>
#include <ATen/ops/special_bessel_j0_cpu_dispatch.h>
#include <ATen/ops/special_bessel_j0_native.h>
#include <ATen/ops/special_bessel_j1_cpu_dispatch.h>
#include <ATen/ops/special_bessel_j1_native.h>
#include <ATen/ops/special_bessel_y0_cpu_dispatch.h>
#include <ATen/ops/special_bessel_y0_native.h>
#include <ATen/ops/special_bessel_y1_cpu_dispatch.h>
#include <ATen/ops/special_bessel_y1_native.h>
#include <ATen/ops/special_chebyshev_polynomial_t_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_chebyshev_polynomial_u_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_chebyshev_polynomial_v_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_chebyshev_polynomial_w_cpu_dispatch.h>
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_entr_cpu_dispatch.h>
#include <ATen/ops/special_entr_native.h>
#include <ATen/ops/special_erfcx_cpu_dispatch.h>
#include <ATen/ops/special_erfcx_native.h>
#include <ATen/ops/special_hermite_polynomial_h_cpu_dispatch.h>
#include <ATen/ops/special_hermite_polynomial_h_native.h>
#include <ATen/ops/special_hermite_polynomial_he_cpu_dispatch.h>
#include <ATen/ops/special_hermite_polynomial_he_native.h>
#include <ATen/ops/special_i0e_cpu_dispatch.h>
#include <ATen/ops/special_i0e_native.h>
#include <ATen/ops/special_i1_cpu_dispatch.h>
#include <ATen/ops/special_i1_native.h>
#include <ATen/ops/special_i1e_cpu_dispatch.h>
#include <ATen/ops/special_i1e_native.h>
#include <ATen/ops/special_laguerre_polynomial_l_cpu_dispatch.h>
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
#include <ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h>
#include <ATen/ops/special_legendre_polynomial_p_native.h>
#include <ATen/ops/special_log_ndtr_cpu_dispatch.h>
#include <ATen/ops/special_log_ndtr_native.h>
#include <ATen/ops/special_modified_bessel_i0_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_i0_native.h>
#include <ATen/ops/special_modified_bessel_i1_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_i1_native.h>
#include <ATen/ops/special_modified_bessel_k0_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_k0_native.h>
#include <ATen/ops/special_modified_bessel_k1_cpu_dispatch.h>
#include <ATen/ops/special_modified_bessel_k1_native.h>
#include <ATen/ops/special_ndtri_cpu_dispatch.h>
#include <ATen/ops/special_ndtri_native.h>
#include <ATen/ops/special_scaled_modified_bessel_k0_cpu_dispatch.h>
#include <ATen/ops/special_scaled_modified_bessel_k0_native.h>
#include <ATen/ops/special_scaled_modified_bessel_k1_cpu_dispatch.h>
#include <ATen/ops/special_scaled_modified_bessel_k1_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_spherical_bessel_j0_cpu_dispatch.h>
#include <ATen/ops/special_spherical_bessel_j0_native.h>
#include <ATen/ops/special_xlog1py_cpu_dispatch.h>
#include <ATen/ops/special_xlog1py_native.h>
#include <ATen/ops/special_zeta_cpu_dispatch.h>
#include <ATen/ops/special_zeta_native.h>
#include <ATen/ops/sqrt_cpu_dispatch.h>
#include <ATen/ops/sqrt_native.h>
#include <ATen/ops/sspaddmm_cpu_dispatch.h>
#include <ATen/ops/sspaddmm_native.h>
#include <ATen/ops/std_cpu_dispatch.h>
#include <ATen/ops/std_mean_cpu_dispatch.h>
#include <ATen/ops/std_mean_native.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/sub_cpu_dispatch.h>
#include <ATen/ops/sub_native.h>
#include <ATen/ops/sum_cpu_dispatch.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/take_cpu_dispatch.h>
#include <ATen/ops/take_native.h>
#include <ATen/ops/tan_cpu_dispatch.h>
#include <ATen/ops/tan_native.h>
#include <ATen/ops/tanh_backward_cpu_dispatch.h>
#include <ATen/ops/tanh_backward_native.h>
#include <ATen/ops/tanh_cpu_dispatch.h>
#include <ATen/ops/tanh_native.h>
#include <ATen/ops/threshold_backward_cpu_dispatch.h>
#include <ATen/ops/threshold_backward_native.h>
#include <ATen/ops/threshold_cpu_dispatch.h>
#include <ATen/ops/threshold_native.h>
#include <ATen/ops/to_mkldnn_cpu_dispatch.h>
#include <ATen/ops/to_mkldnn_native.h>
#include <ATen/ops/topk_cpu_dispatch.h>
#include <ATen/ops/topk_native.h>
#include <ATen/ops/trace_cpu_dispatch.h>
#include <ATen/ops/trace_native.h>
#include <ATen/ops/triangular_solve_cpu_dispatch.h>
#include <ATen/ops/triangular_solve_native.h>
#include <ATen/ops/tril_cpu_dispatch.h>
#include <ATen/ops/tril_indices_cpu_dispatch.h>
#include <ATen/ops/tril_indices_native.h>
#include <ATen/ops/tril_native.h>
#include <ATen/ops/triu_cpu_dispatch.h>
#include <ATen/ops/triu_indices_cpu_dispatch.h>
#include <ATen/ops/triu_indices_native.h>
#include <ATen/ops/triu_native.h>
#include <ATen/ops/trunc_cpu_dispatch.h>
#include <ATen/ops/trunc_native.h>
#include <ATen/ops/unfold_backward_cpu_dispatch.h>
#include <ATen/ops/unfold_backward_native.h>
#include <ATen/ops/unfold_cpu_dispatch.h>
#include <ATen/ops/unfold_native.h>
#include <ATen/ops/uniform_cpu_dispatch.h>
#include <ATen/ops/uniform_native.h>
#include <ATen/ops/unique_consecutive_cpu_dispatch.h>
#include <ATen/ops/unique_consecutive_native.h>
#include <ATen/ops/unique_dim_consecutive_cpu_dispatch.h>
#include <ATen/ops/unique_dim_consecutive_native.h>
#include <ATen/ops/unique_dim_cpu_dispatch.h>
#include <ATen/ops/unique_dim_native.h>
#include <ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_bicubic2d_backward_native.h>
#include <ATen/ops/upsample_bicubic2d_cpu_dispatch.h>
#include <ATen/ops/upsample_bicubic2d_native.h>
#include <ATen/ops/upsample_bilinear2d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_bilinear2d_backward_native.h>
#include <ATen/ops/upsample_bilinear2d_cpu_dispatch.h>
#include <ATen/ops/upsample_bilinear2d_native.h>
#include <ATen/ops/upsample_linear1d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_linear1d_backward_native.h>
#include <ATen/ops/upsample_linear1d_cpu_dispatch.h>
#include <ATen/ops/upsample_linear1d_native.h>
#include <ATen/ops/upsample_nearest1d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest1d_backward_native.h>
#include <ATen/ops/upsample_nearest1d_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest1d_native.h>
#include <ATen/ops/upsample_nearest2d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest2d_backward_native.h>
#include <ATen/ops/upsample_nearest2d_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest2d_native.h>
#include <ATen/ops/upsample_nearest3d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest3d_backward_native.h>
#include <ATen/ops/upsample_nearest3d_cpu_dispatch.h>
#include <ATen/ops/upsample_nearest3d_native.h>
#include <ATen/ops/upsample_trilinear3d_backward_cpu_dispatch.h>
#include <ATen/ops/upsample_trilinear3d_backward_native.h>
#include <ATen/ops/upsample_trilinear3d_cpu_dispatch.h>
#include <ATen/ops/upsample_trilinear3d_native.h>
#include <ATen/ops/var_cpu_dispatch.h>
#include <ATen/ops/var_mean_cpu_dispatch.h>
#include <ATen/ops/var_mean_native.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/vdot_cpu_dispatch.h>
#include <ATen/ops/vdot_native.h>
#include <ATen/ops/view_as_complex_cpu_dispatch.h>
#include <ATen/ops/view_as_complex_native.h>
#include <ATen/ops/view_as_real_cpu_dispatch.h>
#include <ATen/ops/view_as_real_native.h>
#include <ATen/ops/view_cpu_dispatch.h>
#include <ATen/ops/view_native.h>
#include <ATen/ops/where_cpu_dispatch.h>
#include <ATen/ops/where_native.h>
#include <ATen/ops/xlogy_cpu_dispatch.h>
#include <ATen/ops/xlogy_native.h>
#include <ATen/ops/zero_cpu_dispatch.h>
#include <ATen/ops/zero_native.h>

namespace at {
namespace {
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-function")

Tensor create_out(IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  if (strides.empty()) {
      return at::detail::empty_cpu(sizes, options);
  } else {
      return at::detail::empty_strided_cpu(sizes, strides, options);
  }
}

void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  TORCH_CHECK(options.dtype() == out.dtype(),
      "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
  TORCH_CHECK(options.device() == out.device(),
      "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
  const bool resized = at::native::resize_output(out, sizes);
  // Only restride if a resize occurred; otherwise we ignore the (advisory)
  // strides from the meta function and directly use the output tensor's
  // preexisting strides
  if (resized) {
    if (!strides.empty()) {
      TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
      // TODO: avoid the redispatch here
      out.as_strided_(sizes, strides);
    } else if (options.memory_format_opt().has_value()) {
      out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
    }
  }
}

void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
  // These checks are needed on those operators that:
  //   1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
  //   2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
  // For other operators (e.g. 'add'), 'TensorIterator' already checks
  // these things separately.
  TORCH_CHECK(options.dtype() == self.dtype(),
      "Bad in-place call: ",
      "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
  TORCH_CHECK(options.device() == self.device(),
      "Bad in-place call: ",
      "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
  TORCH_CHECK(sizes == self.sizes(),
      "Bad in-place call: ",
      "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
}

std::optional<Tensor> maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  if (out.strides() != strides) {
    return at::detail::empty_strided_cpu(sizes, strides, options);
  }
  return std::nullopt;
}
C10_DIAGNOSTIC_POP()
} // namespace
} // namespace at

// See template file RegisterDispatchDefinitions.ini
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__native_dropout(const at::Tensor & input, double p, ::std::optional<bool> train) {
    // No device check
  // DeviceGuard omitted
  return at::native::native_dropout_cpu(input, p, train);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("native_dropout",
TORCH_FN(wrapper_CPU__native_dropout));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> native_dropout(const at::Tensor & input, double p, ::std::optional<bool> train) {
return wrapper_CPU__native_dropout(input, p, train);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
    // No device check
  // DeviceGuard omitted
  return at::native::native_dropout_backward(grad_output, mask, scale);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("native_dropout_backward",
TORCH_FN(wrapper_CPU__native_dropout_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
return wrapper_CPU__native_dropout_backward(grad_output, mask, scale);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__view_as_complex(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::view_as_complex(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("view_as_complex",
TORCH_FN(wrapper_CPU__view_as_complex));
}
} // anonymous namespace
namespace cpu {
at::Tensor view_as_complex(const at::Tensor & self) {
return wrapper_CPU__view_as_complex(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_asinh_out_functional final : public at::native::structured_asinh_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_asinh(const at::Tensor & self) {
structured_asinh_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_asinh_out_out final : public at::native::structured_asinh_out {
    structured_asinh_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_asinh_out_out(const at::Tensor & self, at::Tensor & out) {
structured_asinh_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_asinh_out_inplace final : public at::native::structured_asinh_out {
    structured_asinh_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_asinh_(at::Tensor & self) {
structured_asinh_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("asinh", TORCH_FN(wrapper_CPU_asinh));
m.impl("asinh.out", TORCH_FN(wrapper_CPU_asinh_out_out));
m.impl("asinh_", TORCH_FN(wrapper_CPU_asinh_));
}
} // anonymous namespace
namespace cpu {
at::Tensor asinh(const at::Tensor & self) {
return wrapper_CPU_asinh(self);
}
at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_asinh_out_out(self, out);
}
at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_asinh_out_out(self, out);
}
at::Tensor & asinh_(at::Tensor & self) {
return wrapper_CPU_asinh_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_baddbmm_out_cpu_functional final : public at::native::structured_baddbmm_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
structured_baddbmm_out_cpu_functional op;
op.meta(self, batch1, batch2, beta, alpha);
op.impl(self, batch1, batch2, beta, alpha, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_baddbmm_out_cpu_out final : public at::native::structured_baddbmm_out_cpu {
    structured_baddbmm_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_baddbmm_out_out(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
structured_baddbmm_out_cpu_out op(out);
op.meta(self, batch1, batch2, beta, alpha);
op.impl(self, batch1, batch2, beta, alpha, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_baddbmm_out_cpu_inplace final : public at::native::structured_baddbmm_out_cpu {
    structured_baddbmm_out_cpu_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_baddbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
structured_baddbmm_out_cpu_inplace op(self);
op.meta(self, batch1, batch2, beta, alpha);
op.impl(self, batch1, batch2, beta, alpha, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("baddbmm", TORCH_FN(wrapper_CPU_baddbmm));
m.impl("baddbmm.out", TORCH_FN(wrapper_CPU_baddbmm_out_out));
m.impl("baddbmm_", TORCH_FN(wrapper_CPU_baddbmm_));
}
} // anonymous namespace
namespace cpu {
at::Tensor baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU_baddbmm(self, batch1, batch2, beta, alpha);
}
at::Tensor & baddbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU_baddbmm_out_out(self, batch1, batch2, beta, alpha, out);
}
at::Tensor & baddbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_CPU_baddbmm_out_out(self, batch1, batch2, beta, alpha, out);
}
at::Tensor & baddbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_CPU_baddbmm_(self, batch1, batch2, beta, alpha);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_bernoulli_out(const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::bernoulli_out(self, generator, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("bernoulli.out",
TORCH_FN(wrapper_CPU_out_bernoulli_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & bernoulli_out(at::Tensor & out, const at::Tensor & self, ::std::optional<at::Generator> generator) {
return wrapper_CPU_out_bernoulli_out(self, generator, out);
}
at::Tensor & bernoulli_outf(const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
return wrapper_CPU_out_bernoulli_out(self, generator, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_Tensor_bernoulli_(at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::bernoulli_(self, p, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("bernoulli_.Tensor",
TORCH_FN(wrapper_CPU_Tensor_bernoulli_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & bernoulli_(at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
return wrapper_CPU_Tensor_bernoulli_(self, p, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_float_bernoulli_(at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::bernoulli_(self, p, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("bernoulli_.float",
TORCH_FN(wrapper_CPU_float_bernoulli_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & bernoulli_(at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
return wrapper_CPU_float_bernoulli_(self, p, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_bmm_out_cpu_functional final : public at::native::structured_bmm_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_bmm(const at::Tensor & self, const at::Tensor & mat2) {
structured_bmm_out_cpu_functional op;
op.meta(self, mat2);
op.impl(self, mat2, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_bmm_out_cpu_out final : public at::native::structured_bmm_out_cpu {
    structured_bmm_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_bmm_out_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
structured_bmm_out_cpu_out op(out);
op.meta(self, mat2);
op.impl(self, mat2, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("bmm", TORCH_FN(wrapper_CPU_bmm));
m.impl("bmm.out", TORCH_FN(wrapper_CPU_bmm_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_CPU_bmm(self, mat2);
}
at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_CPU_bmm_out_out(self, mat2, out);
}
at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
return wrapper_CPU_bmm_out_out(self, mat2, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_clamp_max_out_functional final : public at::native::structured_clamp_max_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_clamp_max(const at::Tensor & self, const at::Scalar & max) {
structured_clamp_max_out_functional op;
op.meta(self, max);
op.impl(self, max, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_clamp_max_out_out final : public at::native::structured_clamp_max_out {
    structured_clamp_max_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp_max_out_out(const at::Tensor & self, const at::Scalar & max, at::Tensor & out) {
structured_clamp_max_out_out op(out);
op.meta(self, max);
op.impl(self, max, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_clamp_max_out_inplace final : public at::native::structured_clamp_max_out {
    structured_clamp_max_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp_max_(at::Tensor & self, const at::Scalar & max) {
structured_clamp_max_out_inplace op(self);
op.meta(self, max);
op.impl(self, max, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("clamp_max", TORCH_FN(wrapper_CPU_clamp_max));
m.impl("clamp_max.out", TORCH_FN(wrapper_CPU_clamp_max_out_out));
m.impl("clamp_max_", TORCH_FN(wrapper_CPU_clamp_max_));
}
} // anonymous namespace
namespace cpu {
at::Tensor clamp_max(const at::Tensor & self, const at::Scalar & max) {
return wrapper_CPU_clamp_max(self, max);
}
at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & max) {
return wrapper_CPU_clamp_max_out_out(self, max, out);
}
at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Scalar & max, at::Tensor & out) {
return wrapper_CPU_clamp_max_out_out(self, max, out);
}
at::Tensor & clamp_max_(at::Tensor & self, const at::Scalar & max) {
return wrapper_CPU_clamp_max_(self, max);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_clamp_max_Tensor_out_functional final : public at::native::structured_clamp_max_Tensor_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_clamp_max_Tensor(const at::Tensor & self, const at::Tensor & max) {
structured_clamp_max_Tensor_out_functional op;
op.meta(self, max);
op.impl(self, max, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_clamp_max_Tensor_out_out final : public at::native::structured_clamp_max_Tensor_out {
    structured_clamp_max_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp_max_out_Tensor_out(const at::Tensor & self, const at::Tensor & max, at::Tensor & out) {
structured_clamp_max_Tensor_out_out op(out);
op.meta(self, max);
op.impl(self, max, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_clamp_max_Tensor_out_inplace final : public at::native::structured_clamp_max_Tensor_out {
    structured_clamp_max_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp_max__Tensor(at::Tensor & self, const at::Tensor & max) {
structured_clamp_max_Tensor_out_inplace op(self);
op.meta(self, max);
op.impl(self, max, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("clamp_max.Tensor", TORCH_FN(wrapper_CPU_clamp_max_Tensor));
m.impl("clamp_max.Tensor_out", TORCH_FN(wrapper_CPU_clamp_max_out_Tensor_out));
m.impl("clamp_max_.Tensor", TORCH_FN(wrapper_CPU_clamp_max__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor clamp_max(const at::Tensor & self, const at::Tensor & max) {
return wrapper_CPU_clamp_max_Tensor(self, max);
}
at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & max) {
return wrapper_CPU_clamp_max_out_Tensor_out(self, max, out);
}
at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Tensor & max, at::Tensor & out) {
return wrapper_CPU_clamp_max_out_Tensor_out(self, max, out);
}
at::Tensor & clamp_max_(at::Tensor & self, const at::Tensor & max) {
return wrapper_CPU_clamp_max__Tensor(self, max);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_clamp_min_out_functional final : public at::native::structured_clamp_min_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_clamp_min(const at::Tensor & self, const at::Scalar & min) {
structured_clamp_min_out_functional op;
op.meta(self, min);
op.impl(self, min, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_clamp_min_out_out final : public at::native::structured_clamp_min_out {
    structured_clamp_min_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp_min_out_out(const at::Tensor & self, const at::Scalar & min, at::Tensor & out) {
structured_clamp_min_out_out op(out);
op.meta(self, min);
op.impl(self, min, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_clamp_min_out_inplace final : public at::native::structured_clamp_min_out {
    structured_clamp_min_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp_min_(at::Tensor & self, const at::Scalar & min) {
structured_clamp_min_out_inplace op(self);
op.meta(self, min);
op.impl(self, min, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("clamp_min", TORCH_FN(wrapper_CPU_clamp_min));
m.impl("clamp_min.out", TORCH_FN(wrapper_CPU_clamp_min_out_out));
m.impl("clamp_min_", TORCH_FN(wrapper_CPU_clamp_min_));
}
} // anonymous namespace
namespace cpu {
at::Tensor clamp_min(const at::Tensor & self, const at::Scalar & min) {
return wrapper_CPU_clamp_min(self, min);
}
at::Tensor & clamp_min_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min) {
return wrapper_CPU_clamp_min_out_out(self, min, out);
}
at::Tensor & clamp_min_outf(const at::Tensor & self, const at::Scalar & min, at::Tensor & out) {
return wrapper_CPU_clamp_min_out_out(self, min, out);
}
at::Tensor & clamp_min_(at::Tensor & self, const at::Scalar & min) {
return wrapper_CPU_clamp_min_(self, min);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_clamp_min_Tensor_out_functional final : public at::native::structured_clamp_min_Tensor_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_clamp_min_Tensor(const at::Tensor & self, const at::Tensor & min) {
structured_clamp_min_Tensor_out_functional op;
op.meta(self, min);
op.impl(self, min, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_clamp_min_Tensor_out_out final : public at::native::structured_clamp_min_Tensor_out {
    structured_clamp_min_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp_min_out_Tensor_out(const at::Tensor & self, const at::Tensor & min, at::Tensor & out) {
structured_clamp_min_Tensor_out_out op(out);
op.meta(self, min);
op.impl(self, min, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_clamp_min_Tensor_out_inplace final : public at::native::structured_clamp_min_Tensor_out {
    structured_clamp_min_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_clamp_min__Tensor(at::Tensor & self, const at::Tensor & min) {
structured_clamp_min_Tensor_out_inplace op(self);
op.meta(self, min);
op.impl(self, min, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("clamp_min.Tensor", TORCH_FN(wrapper_CPU_clamp_min_Tensor));
m.impl("clamp_min.Tensor_out", TORCH_FN(wrapper_CPU_clamp_min_out_Tensor_out));
m.impl("clamp_min_.Tensor", TORCH_FN(wrapper_CPU_clamp_min__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor clamp_min(const at::Tensor & self, const at::Tensor & min) {
return wrapper_CPU_clamp_min_Tensor(self, min);
}
at::Tensor & clamp_min_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & min) {
return wrapper_CPU_clamp_min_out_Tensor_out(self, min, out);
}
at::Tensor & clamp_min_outf(const at::Tensor & self, const at::Tensor & min, at::Tensor & out) {
return wrapper_CPU_clamp_min_out_Tensor_out(self, min, out);
}
at::Tensor & clamp_min_(at::Tensor & self, const at::Tensor & min) {
return wrapper_CPU_clamp_min__Tensor(self, min);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_complex_out(const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::complex_out(real, imag, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("complex.out",
TORCH_FN(wrapper_CPU_out_complex_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & complex_out(at::Tensor & out, const at::Tensor & real, const at::Tensor & imag) {
return wrapper_CPU_out_complex_out(real, imag, out);
}
at::Tensor & complex_outf(const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) {
return wrapper_CPU_out_complex_out(real, imag, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_cos_out_functional final : public at::native::structured_cos_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_cos(const at::Tensor & self) {
structured_cos_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_cos_out_out final : public at::native::structured_cos_out {
    structured_cos_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_cos_out_out(const at::Tensor & self, at::Tensor & out) {
structured_cos_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_cos_out_inplace final : public at::native::structured_cos_out {
    structured_cos_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_cos_(at::Tensor & self) {
structured_cos_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("cos", TORCH_FN(wrapper_CPU_cos));
m.impl("cos.out", TORCH_FN(wrapper_CPU_cos_out_out));
m.impl("cos_", TORCH_FN(wrapper_CPU_cos_));
}
} // anonymous namespace
namespace cpu {
at::Tensor cos(const at::Tensor & self) {
return wrapper_CPU_cos(self);
}
at::Tensor & cos_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_cos_out_out(self, out);
}
at::Tensor & cos_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_cos_out_out(self, out);
}
at::Tensor & cos_(at::Tensor & self) {
return wrapper_CPU_cos_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_dim_IntList_count_nonzero(const at::Tensor & self, at::IntArrayRef dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::count_nonzero_cpu(self, dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("count_nonzero.dim_IntList",
TORCH_FN(wrapper_CPU_dim_IntList_count_nonzero));
}
} // anonymous namespace
namespace cpu {
at::Tensor count_nonzero(const at::Tensor & self, at::IntArrayRef dim) {
return wrapper_CPU_dim_IntList_count_nonzero(self, dim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
void wrapper_CPU___cummax_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::cummax_helper_cpu(self, values, indices, dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_cummax_helper",
TORCH_FN(wrapper_CPU___cummax_helper));
}
} // anonymous namespace
namespace cpu {
void _cummax_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
return wrapper_CPU___cummax_helper(self, values, indices, dim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_cumsum_out_functional final : public at::native::structured_cumsum_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_cumsum(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
structured_cumsum_out_functional op;
op.meta(self, dim, dtype);
op.impl(self, dim, dtype, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_cumsum_out_out final : public at::native::structured_cumsum_out {
    structured_cumsum_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_cumsum_out_out(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
structured_cumsum_out_out op(out);
op.meta(self, dim, dtype);
op.impl(self, dim, dtype, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_cumsum_out_inplace final : public at::native::structured_cumsum_out {
    structured_cumsum_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_cumsum_(at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
structured_cumsum_out_inplace op(self);
op.meta(self, dim, dtype);
op.impl(self, dim, dtype, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("cumsum", TORCH_FN(wrapper_CPU_cumsum));
m.impl("cumsum.out", TORCH_FN(wrapper_CPU_cumsum_out_out));
m.impl("cumsum_", TORCH_FN(wrapper_CPU_cumsum_));
}
} // anonymous namespace
namespace cpu {
at::Tensor cumsum(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_cumsum(self, dim, dtype);
}
at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_cumsum_out_out(self, dim, dtype, out);
}
at::Tensor & cumsum_outf(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
return wrapper_CPU_cumsum_out_out(self, dim, dtype, out);
}
at::Tensor & cumsum_(at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_cumsum_(self, dim, dtype);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
    // No device check
  // DeviceGuard omitted
  return at::native::ctc_loss_cpu(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_ctc_loss",
TORCH_FN(wrapper_CPU___ctc_loss));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
return wrapper_CPU___ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_Tensor__ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
    // No device check
  // DeviceGuard omitted
  return at::native::ctc_loss_tensor(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_ctc_loss.Tensor",
TORCH_FN(wrapper_CPU_Tensor__ctc_loss));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
return wrapper_CPU_Tensor__ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
    // No device check
  // DeviceGuard omitted
  return at::native::ctc_loss_backward_cpu(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_ctc_loss_backward",
TORCH_FN(wrapper_CPU___ctc_loss_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
return wrapper_CPU___ctc_loss_backward(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Tensor__ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
    // No device check
  // DeviceGuard omitted
  return at::native::ctc_loss_backward_tensor(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_ctc_loss_backward.Tensor",
TORCH_FN(wrapper_CPU_Tensor__ctc_loss_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
return wrapper_CPU_Tensor__ctc_loss_backward(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
    // No device check
  // DeviceGuard omitted
  return at::native::embedding_dense_backward_cpu(grad_output, indices, num_weights.guard_int(__FILE__, __LINE__), padding_idx.guard_int(__FILE__, __LINE__), scale_grad_by_freq);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("embedding_dense_backward",
TORCH_FN(wrapper_CPU__embedding_dense_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
return wrapper_CPU__embedding_dense_backward(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
}
at::Tensor embedding_dense_backward_symint(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
return wrapper_CPU__embedding_dense_backward(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___empty_per_channel_affine_quantized(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::empty_per_channel_affine_quantized_other_backends_stub(C10_AS_INTARRAYREF_SLOW(size), scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_empty_per_channel_affine_quantized",
TORCH_FN(wrapper_CPU___empty_per_channel_affine_quantized));
}
} // anonymous namespace
namespace cpu {
at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_CPU___empty_per_channel_affine_quantized(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_CPU___empty_per_channel_affine_quantized(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
}
at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_CPU___empty_per_channel_affine_quantized(size, scales, zero_points, axis, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_CPU___empty_per_channel_affine_quantized(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
const at::Tensor & wrapper_CPU__resize_(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::resize_(self, C10_AS_INTARRAYREF_SLOW(size), memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("resize_",
TORCH_FN(wrapper_CPU__resize_));
}
} // anonymous namespace
namespace cpu {
const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_CPU__resize_(self, c10::fromIntArrayRefSlow(size), memory_format);
}
const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_CPU__resize_(self, size, memory_format);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_exp2_out_functional final : public at::native::structured_exp2_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_exp2(const at::Tensor & self) {
structured_exp2_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_exp2_out_out final : public at::native::structured_exp2_out {
    structured_exp2_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_exp2_out_out(const at::Tensor & self, at::Tensor & out) {
structured_exp2_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_exp2_out_inplace final : public at::native::structured_exp2_out {
    structured_exp2_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_exp2_(at::Tensor & self) {
structured_exp2_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("exp2", TORCH_FN(wrapper_CPU_exp2));
m.impl("exp2.out", TORCH_FN(wrapper_CPU_exp2_out_out));
m.impl("exp2_", TORCH_FN(wrapper_CPU_exp2_));
}
} // anonymous namespace
namespace cpu {
at::Tensor exp2(const at::Tensor & self) {
return wrapper_CPU_exp2(self);
}
at::Tensor & exp2_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_exp2_out_out(self, out);
}
at::Tensor & exp2_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_exp2_out_out(self, out);
}
at::Tensor & exp2_(at::Tensor & self) {
return wrapper_CPU_exp2_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
    // No device check
  // DeviceGuard omitted
  return at::native::grid_sampler_3d_backward_cpu(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("grid_sampler_3d_backward",
TORCH_FN(wrapper_CPU__grid_sampler_3d_backward));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
return wrapper_CPU__grid_sampler_3d_backward(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___fft_c2c(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fft_c2c_mkl(self, C10_AS_INTARRAYREF_SLOW(dim), normalization, forward);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out__fft_c2c_out(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fft_c2c_mkl_out(self, C10_AS_INTARRAYREF_SLOW(dim), normalization, forward, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fft_c2c",
TORCH_FN(wrapper_CPU___fft_c2c));
m.impl("_fft_c2c.out",
TORCH_FN(wrapper_CPU_out__fft_c2c_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _fft_c2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) {
return wrapper_CPU___fft_c2c(self, c10::fromIntArrayRefSlow(dim), normalization, forward);
}
at::Tensor _fft_c2c_symint(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
return wrapper_CPU___fft_c2c(self, dim, normalization, forward);
}
at::Tensor & _fft_c2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) {
return wrapper_CPU_out__fft_c2c_out(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out);
}
at::Tensor & _fft_c2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
return wrapper_CPU_out__fft_c2c_out(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out);
}
at::Tensor & _fft_c2c_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
return wrapper_CPU_out__fft_c2c_out(self, dim, normalization, forward, out);
}
at::Tensor & _fft_c2c_symint_outf(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
return wrapper_CPU_out__fft_c2c_out(self, dim, normalization, forward, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_index_copy_out_functional final : public at::native::structured_index_copy_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
structured_index_copy_out_functional op;
auto precompute = op.meta(self, dim, index, source);
(void)precompute;
op.impl(self, precompute.dim, index, source, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_index_copy_out_out final : public at::native::structured_index_copy_out {
    structured_index_copy_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_index_copy_out_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) {
structured_index_copy_out_out op(out);
auto precompute = op.meta(self, dim, index, source);
(void)precompute;
op.impl(self, precompute.dim, index, source, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_index_copy_out_inplace final : public at::native::structured_index_copy_out {
    structured_index_copy_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
structured_index_copy_out_inplace op(self);
auto precompute = op.meta(self, dim, index, source);
(void)precompute;
op.impl(self, precompute.dim, index, source, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("index_copy", TORCH_FN(wrapper_CPU_index_copy));
m.impl("index_copy.out", TORCH_FN(wrapper_CPU_index_copy_out_out));
m.impl("index_copy_", TORCH_FN(wrapper_CPU_index_copy_));
}
} // anonymous namespace
namespace cpu {
at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
return wrapper_CPU_index_copy(self, dim, index, source);
}
at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
return wrapper_CPU_index_copy_out_out(self, dim, index, source, out);
}
at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) {
return wrapper_CPU_index_copy_out_out(self, dim, index, source, out);
}
at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
return wrapper_CPU_index_copy_(self, dim, index, source);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_isin_Tensor_Tensor_out_functional final : public at::native::structured_isin_Tensor_Tensor_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_isin_Tensor_Tensor(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
structured_isin_Tensor_Tensor_out_functional op;
op.meta(elements, test_elements, assume_unique, invert);
op.impl(elements, test_elements, assume_unique, invert, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_isin_Tensor_Tensor_out_out final : public at::native::structured_isin_Tensor_Tensor_out {
    structured_isin_Tensor_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_isin_out_Tensor_Tensor_out(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
structured_isin_Tensor_Tensor_out_out op(out);
op.meta(elements, test_elements, assume_unique, invert);
op.impl(elements, test_elements, assume_unique, invert, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("isin.Tensor_Tensor", TORCH_FN(wrapper_CPU_isin_Tensor_Tensor));
m.impl("isin.Tensor_Tensor_out", TORCH_FN(wrapper_CPU_isin_out_Tensor_Tensor_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
return wrapper_CPU_isin_Tensor_Tensor(elements, test_elements, assume_unique, invert);
}
at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
return wrapper_CPU_isin_out_Tensor_Tensor_out(elements, test_elements, assume_unique, invert, out);
}
at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
return wrapper_CPU_isin_out_Tensor_Tensor_out(elements, test_elements, assume_unique, invert, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_isin_Tensor_Scalar_out_functional final : public at::native::structured_isin_Tensor_Scalar_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_isin_Tensor_Scalar(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
structured_isin_Tensor_Scalar_out_functional op;
op.meta(elements, test_element, assume_unique, invert);
op.impl(elements, test_element, assume_unique, invert, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_isin_Tensor_Scalar_out_out final : public at::native::structured_isin_Tensor_Scalar_out {
    structured_isin_Tensor_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_isin_out_Tensor_Scalar_out(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) {
structured_isin_Tensor_Scalar_out_out op(out);
op.meta(elements, test_element, assume_unique, invert);
op.impl(elements, test_element, assume_unique, invert, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("isin.Tensor_Scalar", TORCH_FN(wrapper_CPU_isin_Tensor_Scalar));
m.impl("isin.Tensor_Scalar_out", TORCH_FN(wrapper_CPU_isin_out_Tensor_Scalar_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
return wrapper_CPU_isin_Tensor_Scalar(elements, test_element, assume_unique, invert);
}
at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
return wrapper_CPU_isin_out_Tensor_Scalar_out(elements, test_element, assume_unique, invert, out);
}
at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) {
return wrapper_CPU_isin_out_Tensor_Scalar_out(elements, test_element, assume_unique, invert, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_isin_Scalar_Tensor_out_functional final : public at::native::structured_isin_Scalar_Tensor_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_isin_Scalar_Tensor(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
structured_isin_Scalar_Tensor_out_functional op;
op.meta(element, test_elements, assume_unique, invert);
op.impl(element, test_elements, assume_unique, invert, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_isin_Scalar_Tensor_out_out final : public at::native::structured_isin_Scalar_Tensor_out {
    structured_isin_Scalar_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_isin_out_Scalar_Tensor_out(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
structured_isin_Scalar_Tensor_out_out op(out);
op.meta(element, test_elements, assume_unique, invert);
op.impl(element, test_elements, assume_unique, invert, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("isin.Scalar_Tensor", TORCH_FN(wrapper_CPU_isin_Scalar_Tensor));
m.impl("isin.Scalar_Tensor_out", TORCH_FN(wrapper_CPU_isin_out_Scalar_Tensor_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
return wrapper_CPU_isin_Scalar_Tensor(element, test_elements, assume_unique, invert);
}
at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
return wrapper_CPU_isin_out_Scalar_Tensor_out(element, test_elements, assume_unique, invert, out);
}
at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
return wrapper_CPU_isin_out_Scalar_Tensor_out(element, test_elements, assume_unique, invert, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_nan_to_num_out(const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::nan_to_num_out(self, nan, posinf, neginf, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("nan_to_num.out",
TORCH_FN(wrapper_CPU_out_nan_to_num_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & nan_to_num_out(at::Tensor & out, const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
return wrapper_CPU_out_nan_to_num_out(self, nan, posinf, neginf, out);
}
at::Tensor & nan_to_num_outf(const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf, at::Tensor & out) {
return wrapper_CPU_out_nan_to_num_out(self, nan, posinf, neginf, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_linspace_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::linspace_out(start, end, steps, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linspace.out",
TORCH_FN(wrapper_CPU_out_linspace_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps) {
return wrapper_CPU_out_linspace_out(start, end, steps, out);
}
at::Tensor & linspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
return wrapper_CPU_out_linspace_out(start, end, steps, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_log_out_functional final : public at::native::structured_log_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_log(const at::Tensor & self) {
structured_log_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_log_out_out final : public at::native::structured_log_out {
    structured_log_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_log_out_out(const at::Tensor & self, at::Tensor & out) {
structured_log_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_log_out_inplace final : public at::native::structured_log_out {
    structured_log_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_log_(at::Tensor & self) {
structured_log_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("log", TORCH_FN(wrapper_CPU_log));
m.impl("log.out", TORCH_FN(wrapper_CPU_log_out_out));
m.impl("log_", TORCH_FN(wrapper_CPU_log_));
}
} // anonymous namespace
namespace cpu {
at::Tensor log(const at::Tensor & self) {
return wrapper_CPU_log(self);
}
at::Tensor & log_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_log_out_out(self, out);
}
at::Tensor & log_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_log_out_out(self, out);
}
at::Tensor & log_(at::Tensor & self) {
return wrapper_CPU_log_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_log_softmax_cpu_out_functional final : public at::native::structured_log_softmax_cpu_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
structured_log_softmax_cpu_out_functional op;
op.meta(self, dim, half_to_float);
op.impl(self, dim, half_to_float, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_log_softmax_cpu_out_out final : public at::native::structured_log_softmax_cpu_out {
    structured_log_softmax_cpu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__log_softmax_out_out(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
structured_log_softmax_cpu_out_out op(out);
op.meta(self, dim, half_to_float);
op.impl(self, dim, half_to_float, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_log_softmax", TORCH_FN(wrapper_CPU__log_softmax));
m.impl("_log_softmax.out", TORCH_FN(wrapper_CPU__log_softmax_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
return wrapper_CPU__log_softmax(self, dim, half_to_float);
}
at::Tensor & _log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) {
return wrapper_CPU__log_softmax_out_out(self, dim, half_to_float, out);
}
at::Tensor & _log_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
return wrapper_CPU__log_softmax_out_out(self, dim, half_to_float, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_log_softmax_backward_cpu_out_functional final : public at::native::structured_log_softmax_backward_cpu_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
structured_log_softmax_backward_cpu_out_functional op;
op.meta(grad_output, output, dim, input_dtype);
op.impl(grad_output, output, dim, input_dtype, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_log_softmax_backward_cpu_out_out final : public at::native::structured_log_softmax_backward_cpu_out {
    structured_log_softmax_backward_cpu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__log_softmax_backward_data_out_out(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) {
structured_log_softmax_backward_cpu_out_out op(out);
op.meta(grad_output, output, dim, input_dtype);
op.impl(grad_output, output, dim, input_dtype, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_log_softmax_backward_data", TORCH_FN(wrapper_CPU__log_softmax_backward_data));
m.impl("_log_softmax_backward_data.out", TORCH_FN(wrapper_CPU__log_softmax_backward_data_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
return wrapper_CPU__log_softmax_backward_data(grad_output, output, dim, input_dtype);
}
at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
return wrapper_CPU__log_softmax_backward_data_out_out(grad_output, output, dim, input_dtype, out);
}
at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) {
return wrapper_CPU__log_softmax_backward_data_out_out(grad_output, output, dim, input_dtype, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_max_out_functional final : public at::native::structured_max_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 2> outputs_;
};
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_max_dim(const at::Tensor & self, int64_t dim, bool keepdim) {
structured_max_out_functional op;
auto precompute = op.meta(self, dim, keepdim);
(void)precompute;
op.impl(self, precompute.dim, keepdim, op.outputs_[0], op.outputs_[1]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]));
}
struct structured_max_out_out final : public at::native::structured_max_out {
    structured_max_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 2> outputs_;
    std::array<::std::optional<Tensor>, 2> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_max_out_dim_max(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
structured_max_out_out op(max, max_values);
auto precompute = op.meta(self, dim, keepdim);
(void)precompute;
op.impl(self, precompute.dim, keepdim, op.maybe_get_output(0), op.maybe_get_output(1));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
return std::forward_as_tuple(max, max_values);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("max.dim", TORCH_FN(wrapper_CPU_max_dim));
m.impl("max.dim_max", TORCH_FN(wrapper_CPU_max_out_dim_max));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, int64_t dim, bool keepdim) {
return wrapper_CPU_max_dim(self, dim, keepdim);
}
::std::tuple<at::Tensor &,at::Tensor &> max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim) {
return wrapper_CPU_max_out_dim_max(self, dim, keepdim, max, max_values);
}
::std::tuple<at::Tensor &,at::Tensor &> max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
return wrapper_CPU_max_out_dim_max(self, dim, keepdim, max, max_values);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_amax_out_functional final : public at::native::structured_amax_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_amax(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
structured_amax_out_functional op;
op.meta(self, dim, keepdim);
op.impl(self, dim, keepdim, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_amax_out_out final : public at::native::structured_amax_out {
    structured_amax_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_amax_out_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
structured_amax_out_out op(out);
op.meta(self, dim, keepdim);
op.impl(self, dim, keepdim, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("amax", TORCH_FN(wrapper_CPU_amax));
m.impl("amax.out", TORCH_FN(wrapper_CPU_amax_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor amax(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
return wrapper_CPU_amax(self, dim, keepdim);
}
at::Tensor & amax_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
return wrapper_CPU_amax_out_out(self, dim, keepdim, out);
}
at::Tensor & amax_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
return wrapper_CPU_amax_out_out(self, dim, keepdim, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_mean_out_functional final : public at::native::structured_mean_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_mean_dim(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
structured_mean_out_functional op;
op.meta(self, dim, keepdim, dtype);
op.impl(self, dim, keepdim, dtype, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_mean_out_out final : public at::native::structured_mean_out {
    structured_mean_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_mean_out_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
structured_mean_out_out op(out);
op.meta(self, dim, keepdim, dtype);
op.impl(self, dim, keepdim, dtype, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("mean.dim", TORCH_FN(wrapper_CPU_mean_dim));
m.impl("mean.out", TORCH_FN(wrapper_CPU_mean_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_mean_dim(self, dim, keepdim, dtype);
}
at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_CPU_mean_out_out(self, dim, keepdim, dtype, out);
}
at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
return wrapper_CPU_mean_out_out(self, dim, keepdim, dtype, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__nanmedian(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::nanmedian_cpu(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("nanmedian",
TORCH_FN(wrapper_CPU__nanmedian));
}
} // anonymous namespace
namespace cpu {
at::Tensor nanmedian(const at::Tensor & self) {
return wrapper_CPU__nanmedian(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_dim_values_nanmedian_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    // No device check
  // DeviceGuard omitted
  return at::native::nanmedian_out_cpu(self, dim, keepdim, values, indices);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("nanmedian.dim_values",
TORCH_FN(wrapper_CPU_dim_values_nanmedian_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor &,at::Tensor &> nanmedian_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) {
return wrapper_CPU_dim_values_nanmedian_out(self, dim, keepdim, values, indices);
}
::std::tuple<at::Tensor &,at::Tensor &> nanmedian_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
return wrapper_CPU_dim_values_nanmedian_out(self, dim, keepdim, values, indices);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___int_mm(const at::Tensor & self, const at::Tensor & mat2) {
    // No device check
  // DeviceGuard omitted
  return at::native::_int_mm_cpu(self, mat2);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out__int_mm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::_int_mm_out_cpu(self, mat2, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_int_mm",
TORCH_FN(wrapper_CPU___int_mm));
m.impl("_int_mm.out",
TORCH_FN(wrapper_CPU_out__int_mm_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _int_mm(const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_CPU___int_mm(self, mat2);
}
at::Tensor & _int_mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_CPU_out__int_mm_out(self, mat2, out);
}
at::Tensor & _int_mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
return wrapper_CPU_out__int_mm_out(self, mat2, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___dyn_quant_pack_4bit_weight(const at::Tensor & weights, const at::Tensor & scales_zeros, const ::std::optional<at::Tensor> & bias, int64_t block_size, int64_t in_features, int64_t out_features) {
    // No device check
  // DeviceGuard omitted
  return at::native::_dyn_quant_pack_4bit_weight_cpu(weights, scales_zeros, bias, block_size, in_features, out_features);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_dyn_quant_pack_4bit_weight",
TORCH_FN(wrapper_CPU___dyn_quant_pack_4bit_weight));
}
} // anonymous namespace
namespace cpu {
at::Tensor _dyn_quant_pack_4bit_weight(const at::Tensor & weights, const at::Tensor & scales_zeros, const ::std::optional<at::Tensor> & bias, int64_t block_size, int64_t in_features, int64_t out_features) {
return wrapper_CPU___dyn_quant_pack_4bit_weight(weights, scales_zeros, bias, block_size, in_features, out_features);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__mode(const at::Tensor & self, int64_t dim, bool keepdim) {
    // No device check
  // DeviceGuard omitted
  return at::native::mode(self, dim, keepdim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("mode",
TORCH_FN(wrapper_CPU__mode));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> mode(const at::Tensor & self, int64_t dim, bool keepdim) {
return wrapper_CPU__mode(self, dim, keepdim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__narrow_copy(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
    // No device check
  // DeviceGuard omitted
  return at::native::narrow_copy_dense_cpu(self, dim, start.guard_int(__FILE__, __LINE__), length.guard_int(__FILE__, __LINE__));
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_narrow_copy_out(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::narrow_copy_dense_cpu_out(self, dim, start.guard_int(__FILE__, __LINE__), length.guard_int(__FILE__, __LINE__), out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("narrow_copy",
TORCH_FN(wrapper_CPU__narrow_copy));
m.impl("narrow_copy.out",
TORCH_FN(wrapper_CPU_out_narrow_copy_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
return wrapper_CPU__narrow_copy(self, dim, start, length);
}
at::Tensor narrow_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
return wrapper_CPU__narrow_copy(self, dim, start, length);
}
at::Tensor & narrow_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
return wrapper_CPU_out_narrow_copy_out(self, dim, start, length, out);
}
at::Tensor & narrow_copy_outf(const at::Tensor & self, int64_t dim, int64_t start, int64_t length, at::Tensor & out) {
return wrapper_CPU_out_narrow_copy_out(self, dim, start, length, out);
}
at::Tensor & narrow_copy_symint_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
return wrapper_CPU_out_narrow_copy_out(self, dim, start, length, out);
}
at::Tensor & narrow_copy_symint_outf(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
return wrapper_CPU_out_narrow_copy_out(self, dim, start, length, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
    // No device check
  // DeviceGuard omitted
  return at::native::batch_norm_backward_cpu(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("native_batch_norm_backward",
TORCH_FN(wrapper_CPU__native_batch_norm_backward));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
return wrapper_CPU__native_batch_norm_backward(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___pdist_forward(const at::Tensor & self, double p) {
    // No device check
  // DeviceGuard omitted
  return at::native::_pdist_forward(self, p);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_pdist_forward",
TORCH_FN(wrapper_CPU___pdist_forward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _pdist_forward(const at::Tensor & self, double p) {
return wrapper_CPU___pdist_forward(self, p);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___pdist_backward(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
    // No device check
  // DeviceGuard omitted
  return at::native::_pdist_backward(grad, self, p, pdist);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_pdist_backward",
TORCH_FN(wrapper_CPU___pdist_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _pdist_backward(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
return wrapper_CPU___pdist_backward(grad, self, p, pdist);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__pixel_shuffle(const at::Tensor & self, int64_t upscale_factor) {
    // No device check
  // DeviceGuard omitted
  return at::native::pixel_shuffle_cpu(self, upscale_factor);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("pixel_shuffle",
TORCH_FN(wrapper_CPU__pixel_shuffle));
}
} // anonymous namespace
namespace cpu {
at::Tensor pixel_shuffle(const at::Tensor & self, int64_t upscale_factor) {
return wrapper_CPU__pixel_shuffle(self, upscale_factor);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_neg_out_functional final : public at::native::structured_neg_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_neg(const at::Tensor & self) {
structured_neg_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_neg_out_out final : public at::native::structured_neg_out {
    structured_neg_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_neg_out_out(const at::Tensor & self, at::Tensor & out) {
structured_neg_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_neg_out_inplace final : public at::native::structured_neg_out {
    structured_neg_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_neg_(at::Tensor & self) {
structured_neg_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("neg", TORCH_FN(wrapper_CPU_neg));
m.impl("neg.out", TORCH_FN(wrapper_CPU_neg_out_out));
m.impl("neg_", TORCH_FN(wrapper_CPU_neg_));
}
} // anonymous namespace
namespace cpu {
at::Tensor neg(const at::Tensor & self) {
return wrapper_CPU_neg(self);
}
at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_neg_out_out(self, out);
}
at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_neg_out_out(self, out);
}
at::Tensor & neg_(at::Tensor & self) {
return wrapper_CPU_neg_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Tensor_repeat_interleave(const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size) {
    // No device check
  // DeviceGuard omitted
  return at::native::repeat_interleave_cpu(repeats, output_size.has_value() ? ::std::make_optional(output_size->guard_int(__FILE__, __LINE__)) : ::std::nullopt);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("repeat_interleave.Tensor",
TORCH_FN(wrapper_CPU_Tensor_repeat_interleave));
}
} // anonymous namespace
namespace cpu {
at::Tensor repeat_interleave(const at::Tensor & repeats, ::std::optional<int64_t> output_size) {
return wrapper_CPU_Tensor_repeat_interleave(repeats, output_size.has_value() ? ::std::make_optional(c10::SymInt(*output_size)) : ::std::nullopt);
}
at::Tensor repeat_interleave_symint(const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size) {
return wrapper_CPU_Tensor_repeat_interleave(repeats, output_size);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___prelu_kernel(const at::Tensor & self, const at::Tensor & weight) {
    // No device check
  // DeviceGuard omitted
  return at::native::_prelu_kernel(self, weight);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_prelu_kernel",
TORCH_FN(wrapper_CPU___prelu_kernel));
}
} // anonymous namespace
namespace cpu {
at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight) {
return wrapper_CPU___prelu_kernel(self, weight);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_gelu_out_cpu_functional final : public at::native::structured_gelu_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_gelu(const at::Tensor & self, c10::string_view approximate) {
structured_gelu_out_cpu_functional op;
op.meta(self, approximate);
op.impl(self, approximate, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_gelu_out_cpu_out final : public at::native::structured_gelu_out_cpu {
    structured_gelu_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_gelu_out_out(const at::Tensor & self, c10::string_view approximate, at::Tensor & out) {
structured_gelu_out_cpu_out op(out);
op.meta(self, approximate);
op.impl(self, approximate, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_gelu_out_cpu_inplace final : public at::native::structured_gelu_out_cpu {
    structured_gelu_out_cpu_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_gelu_(at::Tensor & self, c10::string_view approximate) {
structured_gelu_out_cpu_inplace op(self);
op.meta(self, approximate);
op.impl(self, approximate, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("gelu", TORCH_FN(wrapper_CPU_gelu));
m.impl("gelu.out", TORCH_FN(wrapper_CPU_gelu_out_out));
m.impl("gelu_", TORCH_FN(wrapper_CPU_gelu_));
}
} // anonymous namespace
namespace cpu {
at::Tensor gelu(const at::Tensor & self, c10::string_view approximate) {
return wrapper_CPU_gelu(self, approximate);
}
at::Tensor & gelu_out(at::Tensor & out, const at::Tensor & self, c10::string_view approximate) {
return wrapper_CPU_gelu_out_out(self, approximate, out);
}
at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out) {
return wrapper_CPU_gelu_out_out(self, approximate, out);
}
at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate) {
return wrapper_CPU_gelu_(self, approximate);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_mish_out_functional final : public at::native::structured_mish_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_mish(const at::Tensor & self) {
structured_mish_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_mish_out_out final : public at::native::structured_mish_out {
    structured_mish_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_mish_out_out(const at::Tensor & self, at::Tensor & out) {
structured_mish_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_mish_out_inplace final : public at::native::structured_mish_out {
    structured_mish_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_mish_(at::Tensor & self) {
structured_mish_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("mish", TORCH_FN(wrapper_CPU_mish));
m.impl("mish.out", TORCH_FN(wrapper_CPU_mish_out_out));
m.impl("mish_", TORCH_FN(wrapper_CPU_mish_));
}
} // anonymous namespace
namespace cpu {
at::Tensor mish(const at::Tensor & self) {
return wrapper_CPU_mish(self);
}
at::Tensor & mish_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_mish_out_out(self, out);
}
at::Tensor & mish_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_mish_out_out(self, out);
}
at::Tensor & mish_(at::Tensor & self) {
return wrapper_CPU_mish_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__mish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::mish_backward(grad_output, self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("mish_backward",
TORCH_FN(wrapper_CPU__mish_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor mish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
return wrapper_CPU__mish_backward(grad_output, self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_softmax_backward_cpu_out_functional final : public at::native::structured_softmax_backward_cpu_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
structured_softmax_backward_cpu_out_functional op;
op.meta(grad_output, output, dim, input_dtype);
op.impl(grad_output, output, dim, input_dtype, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_softmax_backward_cpu_out_out final : public at::native::structured_softmax_backward_cpu_out {
    structured_softmax_backward_cpu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__softmax_backward_data_out_out(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) {
structured_softmax_backward_cpu_out_out op(grad_input);
op.meta(grad_output, output, dim, input_dtype);
op.impl(grad_output, output, dim, input_dtype, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_softmax_backward_data", TORCH_FN(wrapper_CPU__softmax_backward_data));
m.impl("_softmax_backward_data.out", TORCH_FN(wrapper_CPU__softmax_backward_data_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
return wrapper_CPU__softmax_backward_data(grad_output, output, dim, input_dtype);
}
at::Tensor & _softmax_backward_data_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
return wrapper_CPU__softmax_backward_data_out_out(grad_output, output, dim, input_dtype, grad_input);
}
at::Tensor & _softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) {
return wrapper_CPU__softmax_backward_data_out_out(grad_output, output, dim, input_dtype, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_tanh_out_functional final : public at::native::structured_tanh_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_tanh(const at::Tensor & self) {
structured_tanh_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_tanh_out_out final : public at::native::structured_tanh_out {
    structured_tanh_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_tanh_out_out(const at::Tensor & self, at::Tensor & out) {
structured_tanh_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_tanh_out_inplace final : public at::native::structured_tanh_out {
    structured_tanh_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_tanh_(at::Tensor & self) {
structured_tanh_out_inplace op(self);
op.meta(self);
op.impl(self, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("tanh", TORCH_FN(wrapper_CPU_tanh));
m.impl("tanh.out", TORCH_FN(wrapper_CPU_tanh_out_out));
m.impl("tanh_", TORCH_FN(wrapper_CPU_tanh_));
}
} // anonymous namespace
namespace cpu {
at::Tensor tanh(const at::Tensor & self) {
return wrapper_CPU_tanh(self);
}
at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_tanh_out_out(self, out);
}
at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_tanh_out_out(self, out);
}
at::Tensor & tanh_(at::Tensor & self) {
return wrapper_CPU_tanh_(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__roll(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims) {
    // No device check
  // DeviceGuard omitted
  return at::native::roll(self, C10_AS_INTARRAYREF_SLOW(shifts), dims);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("roll",
TORCH_FN(wrapper_CPU__roll));
}
} // anonymous namespace
namespace cpu {
at::Tensor roll(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims) {
return wrapper_CPU__roll(self, c10::fromIntArrayRefSlow(shifts), dims);
}
at::Tensor roll_symint(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims) {
return wrapper_CPU__roll(self, shifts, dims);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
    // No device check
  // DeviceGuard omitted
  return at::native::_nested_view_from_buffer(self, nested_size, nested_strides, offsets);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_nested_view_from_buffer",
TORCH_FN(wrapper_CPU___nested_view_from_buffer));
}
} // anonymous namespace
namespace cpu {
at::Tensor _nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
return wrapper_CPU___nested_view_from_buffer(self, nested_size, nested_strides, offsets);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___unique(const at::Tensor & self, bool sorted, bool return_inverse) {
    // No device check
  // DeviceGuard omitted
  return at::native::_unique_cpu(self, sorted, return_inverse);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_unique",
TORCH_FN(wrapper_CPU___unique));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _unique(const at::Tensor & self, bool sorted, bool return_inverse) {
return wrapper_CPU___unique(self, sorted, return_inverse);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___unique2(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
    // No device check
  // DeviceGuard omitted
  return at::native::_unique2_cpu(self, sorted, return_inverse, return_counts);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_unique2",
TORCH_FN(wrapper_CPU___unique2));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
return wrapper_CPU___unique2(self, sorted, return_inverse, return_counts);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___weight_norm_interface(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::weight_norm_cpu(v, g, dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_weight_norm_interface",
TORCH_FN(wrapper_CPU___weight_norm_interface));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
return wrapper_CPU___weight_norm_interface(v, g, dim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___efficientzerotensor(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    // No device check
  // DeviceGuard omitted
  return at::native::_efficientzerotensor(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_efficientzerotensor",
TORCH_FN(wrapper_CPU___efficientzerotensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options) {
return wrapper_CPU___efficientzerotensor(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
}
at::Tensor _efficientzerotensor(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
return wrapper_CPU___efficientzerotensor(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
}
at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, at::TensorOptions options) {
return wrapper_CPU___efficientzerotensor(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
}
at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
return wrapper_CPU___efficientzerotensor(size, dtype, layout, device, pin_memory);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___standard_gamma(const at::Tensor & self, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::_s_gamma_cpu(self, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_standard_gamma",
TORCH_FN(wrapper_CPU___standard_gamma));
}
} // anonymous namespace
namespace cpu {
at::Tensor _standard_gamma(const at::Tensor & self, ::std::optional<at::Generator> generator) {
return wrapper_CPU___standard_gamma(self, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___dirichlet_grad(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
    // No device check
  // DeviceGuard omitted
  return at::native::_dirichlet_grad_cpu(x, alpha, total);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_dirichlet_grad",
TORCH_FN(wrapper_CPU___dirichlet_grad));
}
} // anonymous namespace
namespace cpu {
at::Tensor _dirichlet_grad(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
return wrapper_CPU___dirichlet_grad(x, alpha, total);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_out__batch_norm_with_update_out(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve) {
    // No device check
  // DeviceGuard omitted
  return at::native::_batch_norm_with_update_cpu_out(input, weight, bias, running_mean, running_var, momentum, eps, out, save_mean, save_invstd, reserve);
}
} // anonymous namespace
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___batch_norm_with_update(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps) {
    // No device check
  // DeviceGuard omitted
  return at::native::_batch_norm_with_update_cpu(input, weight, bias, running_mean, running_var, momentum, eps);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_batch_norm_with_update.out",
TORCH_FN(wrapper_CPU_out__batch_norm_with_update_out));
m.impl("_batch_norm_with_update",
TORCH_FN(wrapper_CPU___batch_norm_with_update));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_with_update_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps) {
return wrapper_CPU_out__batch_norm_with_update_out(input, weight, bias, running_mean, running_var, momentum, eps, out, save_mean, save_invstd, reserve);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_with_update_outf(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve) {
return wrapper_CPU_out__batch_norm_with_update_out(input, weight, bias, running_mean, running_var, momentum, eps, out, save_mean, save_invstd, reserve);
}
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps) {
return wrapper_CPU___batch_norm_with_update(input, weight, bias, running_mean, running_var, momentum, eps);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_norm_dtype_out_functional final : public at::native::structured_norm_dtype_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_norm_ScalarOpt_dim_dtype(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
structured_norm_dtype_out_functional op;
op.meta(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, dtype);
op.impl(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, dtype, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_norm_dtype_out_out final : public at::native::structured_norm_dtype_out {
    structured_norm_dtype_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_norm_out_dtype_out(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
structured_norm_dtype_out_out op(out);
op.meta(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, dtype);
op.impl(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, dtype, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("norm.ScalarOpt_dim_dtype", TORCH_FN(wrapper_CPU_norm_ScalarOpt_dim_dtype));
m.impl("norm.dtype_out", TORCH_FN(wrapper_CPU_norm_out_dtype_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor norm(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
return wrapper_CPU_norm_ScalarOpt_dim_dtype(self, p, dim, keepdim, dtype);
}
at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
return wrapper_CPU_norm_out_dtype_out(self, p, dim, keepdim, dtype, out);
}
at::Tensor & norm_outf(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
return wrapper_CPU_norm_out_dtype_out(self, p, dim, keepdim, dtype, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_norm_out_functional final : public at::native::structured_norm_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_norm_ScalarOpt_dim(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
structured_norm_out_functional op;
op.meta(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim);
op.impl(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_norm_out_out final : public at::native::structured_norm_out {
    structured_norm_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_norm_out_out(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
structured_norm_out_out op(out);
op.meta(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim);
op.impl(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("norm.ScalarOpt_dim", TORCH_FN(wrapper_CPU_norm_ScalarOpt_dim));
m.impl("norm.out", TORCH_FN(wrapper_CPU_norm_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor norm(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
return wrapper_CPU_norm_ScalarOpt_dim(self, p, dim, keepdim);
}
at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
return wrapper_CPU_norm_out_out(self, p, dim, keepdim, out);
}
at::Tensor & norm_outf(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
return wrapper_CPU_norm_out_out(self, p, dim, keepdim, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_Tensor_out_frexp_out(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) {
    // No device check
  // DeviceGuard omitted
  return at::native::frexp_out(self, mantissa, exponent);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("frexp.Tensor_out",
TORCH_FN(wrapper_CPU_Tensor_out_frexp_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor &,at::Tensor &> frexp_out(at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self) {
return wrapper_CPU_Tensor_out_frexp_out(self, mantissa, exponent);
}
::std::tuple<at::Tensor &,at::Tensor &> frexp_outf(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) {
return wrapper_CPU_Tensor_out_frexp_out(self, mantissa, exponent);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_addmm_activation_out_cpu_functional final : public at::native::structured_addmm_activation_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
structured_addmm_activation_out_cpu_functional op;
op.meta(self, mat1, mat2, beta, alpha, use_gelu);
op.impl(self, mat1, mat2, beta, alpha, use_gelu, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_addmm_activation_out_cpu_out final : public at::native::structured_addmm_activation_out_cpu {
    structured_addmm_activation_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__addmm_activation_out_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) {
structured_addmm_activation_out_cpu_out op(out);
op.meta(self, mat1, mat2, beta, alpha, use_gelu);
op.impl(self, mat1, mat2, beta, alpha, use_gelu, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_addmm_activation", TORCH_FN(wrapper_CPU__addmm_activation));
m.impl("_addmm_activation.out", TORCH_FN(wrapper_CPU__addmm_activation_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
return wrapper_CPU__addmm_activation(self, mat1, mat2, beta, alpha, use_gelu);
}
at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
return wrapper_CPU__addmm_activation_out_out(self, mat1, mat2, beta, alpha, use_gelu, out);
}
at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) {
return wrapper_CPU__addmm_activation_out_out(self, mat1, mat2, beta, alpha, use_gelu, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__quantize_per_tensor_dynamic(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
    // No device check
  // DeviceGuard omitted
  return at::native::quantize_per_tensor_dynamic(self, dtype, reduce_range);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("quantize_per_tensor_dynamic",
TORCH_FN(wrapper_CPU__quantize_per_tensor_dynamic));
}
} // anonymous namespace
namespace cpu {
at::Tensor quantize_per_tensor_dynamic(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
return wrapper_CPU__quantize_per_tensor_dynamic(self, dtype, reduce_range);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__quantize_per_tensor(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::quantize_per_tensor(self, scale, zero_point, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("quantize_per_tensor",
TORCH_FN(wrapper_CPU__quantize_per_tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor quantize_per_tensor(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
return wrapper_CPU__quantize_per_tensor(self, scale, zero_point, dtype);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_tensor_qparams_quantize_per_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::quantize_per_tensor_tensor_qparams(self, scale, zero_point, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("quantize_per_tensor.tensor_qparams",
TORCH_FN(wrapper_CPU_tensor_qparams_quantize_per_tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor quantize_per_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
return wrapper_CPU_tensor_qparams_quantize_per_tensor(self, scale, zero_point, dtype);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::vector<at::Tensor> wrapper_CPU_tensors_quantize_per_tensor(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::quantize_per_tensor_list_cpu(tensors, scales, zero_points, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("quantize_per_tensor.tensors",
TORCH_FN(wrapper_CPU_tensors_quantize_per_tensor));
}
} // anonymous namespace
namespace cpu {
::std::vector<at::Tensor> quantize_per_tensor(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
return wrapper_CPU_tensors_quantize_per_tensor(tensors, scales, zero_points, dtype);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__quantize_per_channel(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::quantize_per_channel(self, scales, zero_points, axis, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("quantize_per_channel",
TORCH_FN(wrapper_CPU__quantize_per_channel));
}
} // anonymous namespace
namespace cpu {
at::Tensor quantize_per_channel(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
return wrapper_CPU__quantize_per_channel(self, scales, zero_points, axis, dtype);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___make_per_channel_quantized_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
    // No device check
  // DeviceGuard omitted
  return at::native::make_per_channel_quantized_tensor_cpu(self, scale, zero_point, axis);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_make_per_channel_quantized_tensor",
TORCH_FN(wrapper_CPU___make_per_channel_quantized_tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor _make_per_channel_quantized_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
return wrapper_CPU___make_per_channel_quantized_tensor(self, scale, zero_point, axis);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
    // No device check
  // DeviceGuard omitted
  return at::native::fake_quantize_per_tensor_affine_cachemask(self, scale, zero_point, quant_min, quant_max);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("fake_quantize_per_tensor_affine_cachemask",
TORCH_FN(wrapper_CPU__fake_quantize_per_tensor_affine_cachemask));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
return wrapper_CPU__fake_quantize_per_tensor_affine_cachemask(self, scale, zero_point, quant_min, quant_max);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fake_quantize_per_tensor_affine_cachemask_tensor_qparams",
TORCH_FN(wrapper_CPU___fake_quantize_per_tensor_affine_cachemask_tensor_qparams));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
return wrapper_CPU___fake_quantize_per_tensor_affine_cachemask_tensor_qparams(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___fake_quantize_learnable_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fake_quantize_learnable_per_tensor_affine(self, scale, zero_point, quant_min, quant_max, grad_factor);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fake_quantize_learnable_per_tensor_affine",
TORCH_FN(wrapper_CPU___fake_quantize_learnable_per_tensor_affine));
}
} // anonymous namespace
namespace cpu {
at::Tensor _fake_quantize_learnable_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
return wrapper_CPU___fake_quantize_learnable_per_tensor_affine(self, scale, zero_point, quant_min, quant_max, grad_factor);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___fake_quantize_learnable_per_tensor_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fake_quantize_learnable_per_tensor_affine_backward(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fake_quantize_learnable_per_tensor_affine_backward",
TORCH_FN(wrapper_CPU___fake_quantize_learnable_per_tensor_affine_backward));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
return wrapper_CPU___fake_quantize_learnable_per_tensor_affine_backward(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__fake_quantize_per_channel_affine_cachemask(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
    // No device check
  // DeviceGuard omitted
  return at::native::fake_quantize_per_channel_affine_cachemask(self, scale, zero_point, axis, quant_min, quant_max);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("fake_quantize_per_channel_affine_cachemask",
TORCH_FN(wrapper_CPU__fake_quantize_per_channel_affine_cachemask));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
return wrapper_CPU__fake_quantize_per_channel_affine_cachemask(self, scale, zero_point, axis, quant_min, quant_max);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___fake_quantize_learnable_per_channel_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fake_quantize_learnable_per_channel_affine_backward(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fake_quantize_learnable_per_channel_affine_backward",
TORCH_FN(wrapper_CPU___fake_quantize_learnable_per_channel_affine_backward));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
return wrapper_CPU___fake_quantize_learnable_per_channel_affine_backward(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___masked_softmax_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, ::std::optional<int64_t> dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::masked_softmax_backward_cpu(grad_output, output, mask, dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_masked_softmax_backward",
TORCH_FN(wrapper_CPU___masked_softmax_backward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _masked_softmax_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, ::std::optional<int64_t> dim) {
return wrapper_CPU___masked_softmax_backward(grad_output, output, mask, dim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__view(const at::Tensor & self, c10::SymIntArrayRef size) {
    // No device check
  // DeviceGuard omitted
  return at::native::view(self, C10_AS_INTARRAYREF_SLOW(size));
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("view",
TORCH_FN(wrapper_CPU__view));
}
} // anonymous namespace
namespace cpu {
at::Tensor view(const at::Tensor & self, at::IntArrayRef size) {
return wrapper_CPU__view(self, c10::fromIntArrayRefSlow(size));
}
at::Tensor view_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
return wrapper_CPU__view(self, size);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU__put_(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
    // No device check
  // DeviceGuard omitted
  return at::native::put_(self, index, source, accumulate);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("put_",
TORCH_FN(wrapper_CPU__put_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & put_(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
return wrapper_CPU__put_(self, index, source, accumulate);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_index_add_cpu_out_functional final : public at::native::structured_index_add_cpu_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
structured_index_add_cpu_out_functional op;
auto precompute = op.meta(self, dim, index, source, alpha);
(void)precompute;
op.impl(self, precompute.dim, index, source, alpha, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_index_add_cpu_out_out final : public at::native::structured_index_add_cpu_out {
    structured_index_add_cpu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_index_add_out_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) {
structured_index_add_cpu_out_out op(out);
auto precompute = op.meta(self, dim, index, source, alpha);
(void)precompute;
op.impl(self, precompute.dim, index, source, alpha, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_index_add_cpu_out_inplace final : public at::native::structured_index_add_cpu_out {
    structured_index_add_cpu_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
structured_index_add_cpu_out_inplace op(self);
auto precompute = op.meta(self, dim, index, source, alpha);
(void)precompute;
op.impl(self, precompute.dim, index, source, alpha, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("index_add", TORCH_FN(wrapper_CPU_index_add));
m.impl("index_add.out", TORCH_FN(wrapper_CPU_index_add_out_out));
m.impl("index_add_", TORCH_FN(wrapper_CPU_index_add_));
}
} // anonymous namespace
namespace cpu {
at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
return wrapper_CPU_index_add(self, dim, index, source, alpha);
}
at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
return wrapper_CPU_index_add_out_out(self, dim, index, source, alpha, out);
}
at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_CPU_index_add_out_out(self, dim, index, source, alpha, out);
}
at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
return wrapper_CPU_index_add_(self, dim, index, source, alpha);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_int_Scalar_index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    // No device check
  // DeviceGuard omitted
  return at::native::index_fill_(self, dim, index, value);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("index_fill_.int_Scalar",
TORCH_FN(wrapper_CPU_int_Scalar_index_fill_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
return wrapper_CPU_int_Scalar_index_fill_(self, dim, index, value);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_int_Tensor_index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
    // No device check
  // DeviceGuard omitted
  return at::native::index_fill_(self, dim, index, value);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("index_fill_.int_Tensor",
TORCH_FN(wrapper_CPU_int_Tensor_index_fill_));
}
} // anonymous namespace
namespace cpu {
at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
return wrapper_CPU_int_Tensor_index_fill_(self, dim, index, value);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_scatter_src_out_functional final : public at::native::structured_scatter_src_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_scatter_src(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
structured_scatter_src_out_functional op;
op.meta(self, dim, index, src);
op.impl(self, dim, index, src, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_scatter_src_out_out final : public at::native::structured_scatter_src_out {
    structured_scatter_src_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter_out_src_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
structured_scatter_src_out_out op(out);
op.meta(self, dim, index, src);
op.impl(self, dim, index, src, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_scatter_src_out_inplace final : public at::native::structured_scatter_src_out {
    structured_scatter_src_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter__src(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
structured_scatter_src_out_inplace op(self);
op.meta(self, dim, index, src);
op.impl(self, dim, index, src, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("scatter.src", TORCH_FN(wrapper_CPU_scatter_src));
m.impl("scatter.src_out", TORCH_FN(wrapper_CPU_scatter_out_src_out));
m.impl("scatter_.src", TORCH_FN(wrapper_CPU_scatter__src));
}
} // anonymous namespace
namespace cpu {
at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
return wrapper_CPU_scatter_src(self, dim, index, src);
}
at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
return wrapper_CPU_scatter_out_src_out(self, dim, index, src, out);
}
at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
return wrapper_CPU_scatter_out_src_out(self, dim, index, src, out);
}
at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
return wrapper_CPU_scatter__src(self, dim, index, src);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_scatter_value_out_functional final : public at::native::structured_scatter_value_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_scatter_value(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
structured_scatter_value_out_functional op;
op.meta(self, dim, index, value);
op.impl(self, dim, index, value, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_scatter_value_out_out final : public at::native::structured_scatter_value_out {
    structured_scatter_value_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter_out_value_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
structured_scatter_value_out_out op(out);
op.meta(self, dim, index, value);
op.impl(self, dim, index, value, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_scatter_value_out_inplace final : public at::native::structured_scatter_value_out {
    structured_scatter_value_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter__value(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
structured_scatter_value_out_inplace op(self);
op.meta(self, dim, index, value);
op.impl(self, dim, index, value, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("scatter.value", TORCH_FN(wrapper_CPU_scatter_value));
m.impl("scatter.value_out", TORCH_FN(wrapper_CPU_scatter_out_value_out));
m.impl("scatter_.value", TORCH_FN(wrapper_CPU_scatter__value));
}
} // anonymous namespace
namespace cpu {
at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
return wrapper_CPU_scatter_value(self, dim, index, value);
}
at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
return wrapper_CPU_scatter_out_value_out(self, dim, index, value, out);
}
at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
return wrapper_CPU_scatter_out_value_out(self, dim, index, value, out);
}
at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
return wrapper_CPU_scatter__value(self, dim, index, value);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_scatter_reduce_out_functional final : public at::native::structured_scatter_reduce_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_scatter_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
structured_scatter_reduce_out_functional op;
op.meta(self, dim, index, src, reduce);
op.impl(self, dim, index, src, reduce, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_scatter_reduce_out_out final : public at::native::structured_scatter_reduce_out {
    structured_scatter_reduce_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter_out_reduce_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) {
structured_scatter_reduce_out_out op(out);
op.meta(self, dim, index, src, reduce);
op.impl(self, dim, index, src, reduce, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_scatter_reduce_out_inplace final : public at::native::structured_scatter_reduce_out {
    structured_scatter_reduce_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter__reduce(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
structured_scatter_reduce_out_inplace op(self);
op.meta(self, dim, index, src, reduce);
op.impl(self, dim, index, src, reduce, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("scatter.reduce", TORCH_FN(wrapper_CPU_scatter_reduce));
m.impl("scatter.reduce_out", TORCH_FN(wrapper_CPU_scatter_out_reduce_out));
m.impl("scatter_.reduce", TORCH_FN(wrapper_CPU_scatter__reduce));
}
} // anonymous namespace
namespace cpu {
at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
return wrapper_CPU_scatter_reduce(self, dim, index, src, reduce);
}
at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
return wrapper_CPU_scatter_out_reduce_out(self, dim, index, src, reduce, out);
}
at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) {
return wrapper_CPU_scatter_out_reduce_out(self, dim, index, src, reduce, out);
}
at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
return wrapper_CPU_scatter__reduce(self, dim, index, src, reduce);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_scatter_value_reduce_out_functional final : public at::native::structured_scatter_value_reduce_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_scatter_value_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
structured_scatter_value_reduce_out_functional op;
op.meta(self, dim, index, value, reduce);
op.impl(self, dim, index, value, reduce, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_scatter_value_reduce_out_out final : public at::native::structured_scatter_value_reduce_out {
    structured_scatter_value_reduce_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter_out_value_reduce_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) {
structured_scatter_value_reduce_out_out op(out);
op.meta(self, dim, index, value, reduce);
op.impl(self, dim, index, value, reduce, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_scatter_value_reduce_out_inplace final : public at::native::structured_scatter_value_reduce_out {
    structured_scatter_value_reduce_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter__value_reduce(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
structured_scatter_value_reduce_out_inplace op(self);
op.meta(self, dim, index, value, reduce);
op.impl(self, dim, index, value, reduce, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("scatter.value_reduce", TORCH_FN(wrapper_CPU_scatter_value_reduce));
m.impl("scatter.value_reduce_out", TORCH_FN(wrapper_CPU_scatter_out_value_reduce_out));
m.impl("scatter_.value_reduce", TORCH_FN(wrapper_CPU_scatter__value_reduce));
}
} // anonymous namespace
namespace cpu {
at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
return wrapper_CPU_scatter_value_reduce(self, dim, index, value, reduce);
}
at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
return wrapper_CPU_scatter_out_value_reduce_out(self, dim, index, value, reduce, out);
}
at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) {
return wrapper_CPU_scatter_out_value_reduce_out(self, dim, index, value, reduce, out);
}
at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
return wrapper_CPU_scatter__value_reduce(self, dim, index, value, reduce);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_scatter_add_functional final : public at::native::structured_scatter_add {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_scatter_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
structured_scatter_add_functional op;
op.meta(self, dim, index, src);
op.impl(self, dim, index, src, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_scatter_add_out final : public at::native::structured_scatter_add {
    structured_scatter_add_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter_add_out_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
structured_scatter_add_out op(out);
op.meta(self, dim, index, src);
op.impl(self, dim, index, src, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_scatter_add_inplace final : public at::native::structured_scatter_add {
    structured_scatter_add_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
structured_scatter_add_inplace op(self);
op.meta(self, dim, index, src);
op.impl(self, dim, index, src, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("scatter_add", TORCH_FN(wrapper_CPU_scatter_add));
m.impl("scatter_add.out", TORCH_FN(wrapper_CPU_scatter_add_out_out));
m.impl("scatter_add_", TORCH_FN(wrapper_CPU_scatter_add_));
}
} // anonymous namespace
namespace cpu {
at::Tensor scatter_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
return wrapper_CPU_scatter_add(self, dim, index, src);
}
at::Tensor & scatter_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
return wrapper_CPU_scatter_add_out_out(self, dim, index, src, out);
}
at::Tensor & scatter_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
return wrapper_CPU_scatter_add_out_out(self, dim, index, src, out);
}
at::Tensor & scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
return wrapper_CPU_scatter_add_(self, dim, index, src);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Scalar___lshift__(const at::Tensor & self, const at::Scalar & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::__lshift__(self, other);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_Scalar___ilshift__(at::Tensor & self, const at::Scalar & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::__ilshift__(self, other);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("__lshift__.Scalar",
TORCH_FN(wrapper_CPU_Scalar___lshift__));
m.impl("__ilshift__.Scalar",
TORCH_FN(wrapper_CPU_Scalar___ilshift__));
}
} // anonymous namespace
namespace cpu {
at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other) {
return wrapper_CPU_Scalar___lshift__(self, other);
}
at::Tensor & __ilshift__(at::Tensor & self, const at::Scalar & other) {
return wrapper_CPU_Scalar___ilshift__(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Tensor___lshift__(const at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::__lshift__(self, other);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_Tensor___ilshift__(at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::__ilshift__(self, other);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("__lshift__.Tensor",
TORCH_FN(wrapper_CPU_Tensor___lshift__));
m.impl("__ilshift__.Tensor",
TORCH_FN(wrapper_CPU_Tensor___ilshift__));
}
} // anonymous namespace
namespace cpu {
at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_Tensor___lshift__(self, other);
}
at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_Tensor___ilshift__(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_lt_Scalar_out_functional final : public at::native::structured_lt_Scalar_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_lt_Scalar(const at::Tensor & self, const at::Scalar & other) {
structured_lt_Scalar_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_lt_Scalar_out_out final : public at::native::structured_lt_Scalar_out {
    structured_lt_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_lt_out_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
structured_lt_Scalar_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_lt_Scalar_out_inplace final : public at::native::structured_lt_Scalar_out {
    structured_lt_Scalar_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_lt__Scalar(at::Tensor & self, const at::Scalar & other) {
structured_lt_Scalar_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("lt.Scalar", TORCH_FN(wrapper_CPU_lt_Scalar));
m.impl("lt.Scalar_out", TORCH_FN(wrapper_CPU_lt_out_Scalar_out));
m.impl("lt_.Scalar", TORCH_FN(wrapper_CPU_lt__Scalar));
}
} // anonymous namespace
namespace cpu {
at::Tensor lt(const at::Tensor & self, const at::Scalar & other) {
return wrapper_CPU_lt_Scalar(self, other);
}
at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
return wrapper_CPU_lt_out_Scalar_out(self, other, out);
}
at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
return wrapper_CPU_lt_out_Scalar_out(self, other, out);
}
at::Tensor & lt_(at::Tensor & self, const at::Scalar & other) {
return wrapper_CPU_lt__Scalar(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_lt_Tensor_out_functional final : public at::native::structured_lt_Tensor_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_lt_Tensor(const at::Tensor & self, const at::Tensor & other) {
structured_lt_Tensor_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_lt_Tensor_out_out final : public at::native::structured_lt_Tensor_out {
    structured_lt_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_lt_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_lt_Tensor_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_lt_Tensor_out_inplace final : public at::native::structured_lt_Tensor_out {
    structured_lt_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_lt__Tensor(at::Tensor & self, const at::Tensor & other) {
structured_lt_Tensor_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("lt.Tensor", TORCH_FN(wrapper_CPU_lt_Tensor));
m.impl("lt.Tensor_out", TORCH_FN(wrapper_CPU_lt_out_Tensor_out));
m.impl("lt_.Tensor", TORCH_FN(wrapper_CPU_lt__Tensor));
}
} // anonymous namespace
namespace cpu {
at::Tensor lt(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_lt_Tensor(self, other);
}
at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_lt_out_Tensor_out(self, other, out);
}
at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_lt_out_Tensor_out(self, other, out);
}
at::Tensor & lt_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_lt__Tensor(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__take(const at::Tensor & self, const at::Tensor & index) {
    // No device check
  // DeviceGuard omitted
  return at::native::take(self, index);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_take_out(const at::Tensor & self, const at::Tensor & index, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::take_out(self, index, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("take",
TORCH_FN(wrapper_CPU__take));
m.impl("take.out",
TORCH_FN(wrapper_CPU_out_take_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor take(const at::Tensor & self, const at::Tensor & index) {
return wrapper_CPU__take(self, index);
}
at::Tensor & take_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & index) {
return wrapper_CPU_out_take_out(self, index, out);
}
at::Tensor & take_outf(const at::Tensor & self, const at::Tensor & index, at::Tensor & out) {
return wrapper_CPU_out_take_out(self, index, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__nonzero_static(const at::Tensor & self, c10::SymInt size, int64_t fill_value) {
    // No device check
  // DeviceGuard omitted
  return at::native::nonzero_static_cpu(self, size.guard_int(__FILE__, __LINE__), fill_value);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_nonzero_static_out(const at::Tensor & self, c10::SymInt size, int64_t fill_value, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::nonzero_static_out_cpu(self, size.guard_int(__FILE__, __LINE__), fill_value, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("nonzero_static",
TORCH_FN(wrapper_CPU__nonzero_static));
m.impl("nonzero_static.out",
TORCH_FN(wrapper_CPU_out_nonzero_static_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor nonzero_static(const at::Tensor & self, int64_t size, int64_t fill_value) {
return wrapper_CPU__nonzero_static(self, size, fill_value);
}
at::Tensor nonzero_static_symint(const at::Tensor & self, c10::SymInt size, int64_t fill_value) {
return wrapper_CPU__nonzero_static(self, size, fill_value);
}
at::Tensor & nonzero_static_out(at::Tensor & out, const at::Tensor & self, int64_t size, int64_t fill_value) {
return wrapper_CPU_out_nonzero_static_out(self, size, fill_value, out);
}
at::Tensor & nonzero_static_outf(const at::Tensor & self, int64_t size, int64_t fill_value, at::Tensor & out) {
return wrapper_CPU_out_nonzero_static_out(self, size, fill_value, out);
}
at::Tensor & nonzero_static_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymInt size, int64_t fill_value) {
return wrapper_CPU_out_nonzero_static_out(self, size, fill_value, out);
}
at::Tensor & nonzero_static_symint_outf(const at::Tensor & self, c10::SymInt size, int64_t fill_value, at::Tensor & out) {
return wrapper_CPU_out_nonzero_static_out(self, size, fill_value, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_gather_out_functional final : public at::native::structured_gather_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
structured_gather_out_functional op;
op.meta(self, dim, index, sparse_grad);
op.impl(self, dim, index, sparse_grad, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_gather_out_out final : public at::native::structured_gather_out {
    structured_gather_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_gather_out_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
structured_gather_out_out op(out);
op.meta(self, dim, index, sparse_grad);
op.impl(self, dim, index, sparse_grad, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("gather", TORCH_FN(wrapper_CPU_gather));
m.impl("gather.out", TORCH_FN(wrapper_CPU_gather_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
return wrapper_CPU_gather(self, dim, index, sparse_grad);
}
at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
return wrapper_CPU_gather_out_out(self, dim, index, sparse_grad, out);
}
at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
return wrapper_CPU_gather_out_out(self, dim, index, sparse_grad, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__cholesky(const at::Tensor & self, bool upper) {
    // No device check
  // DeviceGuard omitted
  return at::native::cholesky(self, upper);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_cholesky_out(const at::Tensor & self, bool upper, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::cholesky_out(self, upper, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("cholesky",
TORCH_FN(wrapper_CPU__cholesky));
m.impl("cholesky.out",
TORCH_FN(wrapper_CPU_out_cholesky_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor cholesky(const at::Tensor & self, bool upper) {
return wrapper_CPU__cholesky(self, upper);
}
at::Tensor & cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper) {
return wrapper_CPU_out_cholesky_out(self, upper, out);
}
at::Tensor & cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out) {
return wrapper_CPU_out_cholesky_out(self, upper, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper) {
    // No device check
  // DeviceGuard omitted
  return at::native::_cholesky_solve_helper_cpu(self, A, upper);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_cholesky_solve_helper",
TORCH_FN(wrapper_CPU___cholesky_solve_helper));
}
} // anonymous namespace
namespace cpu {
at::Tensor _cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper) {
return wrapper_CPU___cholesky_solve_helper(self, A, upper);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_polygamma_out_functional final : public at::native::structured_polygamma_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_polygamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_polygamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_polygamma(int64_t n, const at::Tensor & self) {
structured_polygamma_out_functional op;
op.meta(n, self);
op.impl(n, self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_polygamma_out_out final : public at::native::structured_polygamma_out {
    structured_polygamma_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_polygamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_polygamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_polygamma_out_out(int64_t n, const at::Tensor & self, at::Tensor & out) {
structured_polygamma_out_out op(out);
op.meta(n, self);
op.impl(n, self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("polygamma", TORCH_FN(wrapper_CPU_polygamma));
m.impl("polygamma.out", TORCH_FN(wrapper_CPU_polygamma_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor polygamma(int64_t n, const at::Tensor & self) {
return wrapper_CPU_polygamma(n, self);
}
at::Tensor & polygamma_out(at::Tensor & out, int64_t n, const at::Tensor & self) {
return wrapper_CPU_polygamma_out_out(n, self, out);
}
at::Tensor & polygamma_outf(int64_t n, const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_polygamma_out_out(n, self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_igamma_out_functional final : public at::native::structured_igamma_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_igamma(const at::Tensor & self, const at::Tensor & other) {
structured_igamma_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_igamma_out_out final : public at::native::structured_igamma_out {
    structured_igamma_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_igamma_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_igamma_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
struct structured_igamma_out_inplace final : public at::native::structured_igamma_out {
    structured_igamma_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        check_inplace(out, sizes, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_igamma_(at::Tensor & self, const at::Tensor & other) {
structured_igamma_out_inplace op(self);
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return self;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("igamma", TORCH_FN(wrapper_CPU_igamma));
m.impl("igamma.out", TORCH_FN(wrapper_CPU_igamma_out_out));
m.impl("igamma_", TORCH_FN(wrapper_CPU_igamma_));
}
} // anonymous namespace
namespace cpu {
at::Tensor igamma(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_igamma(self, other);
}
at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_igamma_out_out(self, other, out);
}
at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_igamma_out_out(self, other, out);
}
at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_igamma_(self, other);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_fmin_out_functional final : public at::native::structured_fmin_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_fmin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_fmin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_fmin(const at::Tensor & self, const at::Tensor & other) {
structured_fmin_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_fmin_out_out final : public at::native::structured_fmin_out {
    structured_fmin_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_fmin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_fmin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_fmin_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_fmin_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("fmin", TORCH_FN(wrapper_CPU_fmin));
m.impl("fmin.out", TORCH_FN(wrapper_CPU_fmin_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor fmin(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_fmin(self, other);
}
at::Tensor & fmin_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_fmin_out_out(self, other, out);
}
at::Tensor & fmin_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_fmin_out_out(self, other, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__max(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::max(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_unary_out_max_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::max_unary_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("max",
TORCH_FN(wrapper_CPU__max));
m.impl("max.unary_out",
TORCH_FN(wrapper_CPU_unary_out_max_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor max(const at::Tensor & self) {
return wrapper_CPU__max(self);
}
at::Tensor & max_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_unary_out_max_out(self, out);
}
at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_unary_out_max_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_fmax_out_functional final : public at::native::structured_fmax_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_fmax_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_fmax_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_fmax(const at::Tensor & self, const at::Tensor & other) {
structured_fmax_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_fmax_out_out final : public at::native::structured_fmax_out {
    structured_fmax_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_fmax_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_fmax_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_fmax_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_fmax_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("fmax", TORCH_FN(wrapper_CPU_fmax));
m.impl("fmax.out", TORCH_FN(wrapper_CPU_fmax_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor fmax(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_fmax(self, other);
}
at::Tensor & fmax_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_fmax_out_out(self, other, out);
}
at::Tensor & fmax_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_fmax_out_out(self, other, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_maximum_out_functional final : public at::native::structured_maximum_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_maximum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_maximum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_maximum(const at::Tensor & self, const at::Tensor & other) {
structured_maximum_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_maximum_out_out final : public at::native::structured_maximum_out {
    structured_maximum_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_maximum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_maximum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_maximum_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_maximum_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("maximum", TORCH_FN(wrapper_CPU_maximum));
m.impl("maximum.out", TORCH_FN(wrapper_CPU_maximum_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor maximum(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_maximum(self, other);
}
at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_maximum_out_out(self, other, out);
}
at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_maximum_out_out(self, other, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
void wrapper_CPU___amp_foreach_non_finite_check_and_unscale_(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
    // No device check
  // DeviceGuard omitted
  return at::native::_amp_foreach_non_finite_check_and_unscale_cpu_(self, found_inf, inv_scale);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_amp_foreach_non_finite_check_and_unscale_",
TORCH_FN(wrapper_CPU___amp_foreach_non_finite_check_and_unscale_));
}
} // anonymous namespace
namespace cpu {
void _amp_foreach_non_finite_check_and_unscale_(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
return wrapper_CPU___amp_foreach_non_finite_check_and_unscale_(self, found_inf, inv_scale);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Tensor_searchsorted(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
    // No device check
  // DeviceGuard omitted
  return at::native::searchsorted_cpu(sorted_sequence, self, out_int32, right, side, sorter);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_Tensor_out_searchsorted_out(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::searchsorted_out_cpu(sorted_sequence, self, out_int32, right, side, sorter, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("searchsorted.Tensor",
TORCH_FN(wrapper_CPU_Tensor_searchsorted));
m.impl("searchsorted.Tensor_out",
TORCH_FN(wrapper_CPU_Tensor_out_searchsorted_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
return wrapper_CPU_Tensor_searchsorted(sorted_sequence, self, out_int32, right, side, sorter);
}
at::Tensor & searchsorted_out(at::Tensor & out, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
return wrapper_CPU_Tensor_out_searchsorted_out(sorted_sequence, self, out_int32, right, side, sorter, out);
}
at::Tensor & searchsorted_outf(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter, at::Tensor & out) {
return wrapper_CPU_Tensor_out_searchsorted_out(sorted_sequence, self, out_int32, right, side, sorter, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU_Scalar_searchsorted(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
    // No device check
  // DeviceGuard omitted
  return at::native::searchsorted_cpu(sorted_sequence, self, out_int32, right, side, sorter);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_Scalar_out_searchsorted_out(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::searchsorted_out_cpu(sorted_sequence, self, out_int32, right, side, sorter, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("searchsorted.Scalar",
TORCH_FN(wrapper_CPU_Scalar_searchsorted));
m.impl("searchsorted.Scalar_out",
TORCH_FN(wrapper_CPU_Scalar_out_searchsorted_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
return wrapper_CPU_Scalar_searchsorted(sorted_sequence, self, out_int32, right, side, sorter);
}
at::Tensor & searchsorted_out(at::Tensor & out, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
return wrapper_CPU_Scalar_out_searchsorted_out(sorted_sequence, self, out_int32, right, side, sorter, out);
}
at::Tensor & searchsorted_outf(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter, at::Tensor & out) {
return wrapper_CPU_Scalar_out_searchsorted_out(sorted_sequence, self, out_int32, right, side, sorter, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    // No device check
  // DeviceGuard omitted
  return at::native::mse_loss_backward(grad_output, self, target, reduction);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_grad_input_mse_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::mse_loss_backward_out(grad_output, self, target, reduction, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("mse_loss_backward",
TORCH_FN(wrapper_CPU__mse_loss_backward));
m.impl("mse_loss_backward.grad_input",
TORCH_FN(wrapper_CPU_grad_input_mse_loss_backward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
return wrapper_CPU__mse_loss_backward(grad_output, self, target, reduction);
}
at::Tensor & mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
return wrapper_CPU_grad_input_mse_loss_backward_out(grad_output, self, target, reduction, grad_input);
}
at::Tensor & mse_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
return wrapper_CPU_grad_input_mse_loss_backward_out(grad_output, self, target, reduction, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_grad_input_smooth_l1_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::smooth_l1_loss_backward_out(grad_output, self, target, reduction, beta, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("smooth_l1_loss_backward.grad_input",
TORCH_FN(wrapper_CPU_grad_input_smooth_l1_loss_backward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & smooth_l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
return wrapper_CPU_grad_input_smooth_l1_loss_backward_out(grad_output, self, target, reduction, beta, grad_input);
}
at::Tensor & smooth_l1_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) {
return wrapper_CPU_grad_input_smooth_l1_loss_backward_out(grad_output, self, target, reduction, beta, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_huber_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::huber_loss_backward_out(grad_output, self, target, reduction, delta, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("huber_loss_backward.out",
TORCH_FN(wrapper_CPU_out_huber_loss_backward_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor & huber_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
return wrapper_CPU_out_huber_loss_backward_out(grad_output, self, target, reduction, delta, grad_input);
}
at::Tensor & huber_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) {
return wrapper_CPU_out_huber_loss_backward_out(grad_output, self, target, reduction, delta, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_elu_backward_out_functional final : public at::native::structured_elu_backward_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_elu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_elu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
structured_elu_backward_out_functional op;
op.meta(grad_output, alpha, scale, input_scale, is_result, self_or_result);
op.impl(grad_output, alpha, scale, input_scale, is_result, self_or_result, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_elu_backward_out_out final : public at::native::structured_elu_backward_out {
    structured_elu_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_elu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_elu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_elu_backward_out_grad_input(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) {
structured_elu_backward_out_out op(grad_input);
op.meta(grad_output, alpha, scale, input_scale, is_result, self_or_result);
op.impl(grad_output, alpha, scale, input_scale, is_result, self_or_result, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("elu_backward", TORCH_FN(wrapper_CPU_elu_backward));
m.impl("elu_backward.grad_input", TORCH_FN(wrapper_CPU_elu_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
return wrapper_CPU_elu_backward(grad_output, alpha, scale, input_scale, is_result, self_or_result);
}
at::Tensor & elu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
return wrapper_CPU_elu_backward_out_grad_input(grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input);
}
at::Tensor & elu_backward_outf(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) {
return wrapper_CPU_elu_backward_out_grad_input(grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::glu_jvp(glu, x, dx, dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("glu_jvp",
TORCH_FN(wrapper_CPU__glu_jvp));
}
} // anonymous namespace
namespace cpu {
at::Tensor glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
return wrapper_CPU__glu_jvp(glu, x, dx, dim);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_hardsigmoid_backward_out_functional final : public at::native::structured_hardsigmoid_backward_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_hardsigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_hardsigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self) {
structured_hardsigmoid_backward_out_functional op;
op.meta(grad_output, self);
op.impl(grad_output, self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_hardsigmoid_backward_out_out final : public at::native::structured_hardsigmoid_backward_out {
    structured_hardsigmoid_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_hardsigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_hardsigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_hardsigmoid_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
structured_hardsigmoid_backward_out_out op(grad_input);
op.meta(grad_output, self);
op.impl(grad_output, self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("hardsigmoid_backward", TORCH_FN(wrapper_CPU_hardsigmoid_backward));
m.impl("hardsigmoid_backward.grad_input", TORCH_FN(wrapper_CPU_hardsigmoid_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self) {
return wrapper_CPU_hardsigmoid_backward(grad_output, self);
}
at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) {
return wrapper_CPU_hardsigmoid_backward_out_grad_input(grad_output, self, grad_input);
}
at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
return wrapper_CPU_hardsigmoid_backward_out_grad_input(grad_output, self, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__log_sigmoid_forward(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::log_sigmoid_forward_cpu(self);
}
} // anonymous namespace
namespace {
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_output_log_sigmoid_forward_out(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) {
    // No device check
  // DeviceGuard omitted
  return at::native::log_sigmoid_forward_out_cpu(self, output, buffer);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("log_sigmoid_forward",
TORCH_FN(wrapper_CPU__log_sigmoid_forward));
m.impl("log_sigmoid_forward.output",
TORCH_FN(wrapper_CPU_output_log_sigmoid_forward_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward(const at::Tensor & self) {
return wrapper_CPU__log_sigmoid_forward(self);
}
::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self) {
return wrapper_CPU_output_log_sigmoid_forward_out(self, output, buffer);
}
::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_outf(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) {
return wrapper_CPU_output_log_sigmoid_forward_out(self, output, buffer);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_CPU_out_rrelu_with_noise_out(const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::rrelu_with_noise_out_cpu(self, noise, lower, upper, training, generator, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU__rrelu_with_noise_(at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::rrelu_with_noise_cpu_(self, noise, lower, upper, training, generator);
}
} // anonymous namespace
namespace {
at::Tensor wrapper_CPU__rrelu_with_noise(const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::rrelu_with_noise_cpu(self, noise, lower, upper, training, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("rrelu_with_noise.out",
TORCH_FN(wrapper_CPU_out_rrelu_with_noise_out));
m.impl("rrelu_with_noise_",
TORCH_FN(wrapper_CPU__rrelu_with_noise_));
m.impl("rrelu_with_noise",
TORCH_FN(wrapper_CPU__rrelu_with_noise));
}
} // anonymous namespace
namespace cpu {
at::Tensor & rrelu_with_noise_out(at::Tensor & out, const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
return wrapper_CPU_out_rrelu_with_noise_out(self, noise, lower, upper, training, generator, out);
}
at::Tensor & rrelu_with_noise_outf(const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator, at::Tensor & out) {
return wrapper_CPU_out_rrelu_with_noise_out(self, noise, lower, upper, training, generator, out);
}
at::Tensor & rrelu_with_noise_(at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
return wrapper_CPU__rrelu_with_noise_(self, noise, lower, upper, training, generator);
}
at::Tensor rrelu_with_noise(const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
return wrapper_CPU__rrelu_with_noise(self, noise, lower, upper, training, generator);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_softplus_backward_out_functional final : public at::native::structured_softplus_backward_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softplus_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softplus_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
structured_softplus_backward_out_functional op;
op.meta(grad_output, self, beta, threshold);
op.impl(grad_output, self, beta, threshold, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_softplus_backward_out_out final : public at::native::structured_softplus_backward_out {
    structured_softplus_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softplus_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_softplus_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_softplus_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) {
structured_softplus_backward_out_out op(grad_input);
op.meta(grad_output, self, beta, threshold);
op.impl(grad_output, self, beta, threshold, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("softplus_backward", TORCH_FN(wrapper_CPU_softplus_backward));
m.impl("softplus_backward.grad_input", TORCH_FN(wrapper_CPU_softplus_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
return wrapper_CPU_softplus_backward(grad_output, self, beta, threshold);
}
at::Tensor & softplus_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
return wrapper_CPU_softplus_backward_out_grad_input(grad_output, self, beta, threshold, grad_input);
}
at::Tensor & softplus_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) {
return wrapper_CPU_softplus_backward_out_grad_input(grad_output, self, beta, threshold, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_adaptive_max_pool2d_out_cpu_functional final : public at::native::structured_adaptive_max_pool2d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 2> outputs_;
};
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
structured_adaptive_max_pool2d_out_cpu_functional op;
op.meta(self, output_size);
op.impl(self, output_size, op.outputs_[0], op.outputs_[1]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]));
}
struct structured_adaptive_max_pool2d_out_cpu_out final : public at::native::structured_adaptive_max_pool2d_out_cpu {
    structured_adaptive_max_pool2d_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 2> outputs_;
    std::array<::std::optional<Tensor>, 2> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_adaptive_max_pool2d_out_out(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
structured_adaptive_max_pool2d_out_cpu_out op(out, indices);
op.meta(self, output_size);
op.impl(self, output_size, op.maybe_get_output(0), op.maybe_get_output(1));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
return std::forward_as_tuple(out, indices);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("adaptive_max_pool2d", TORCH_FN(wrapper_CPU_adaptive_max_pool2d));
m.impl("adaptive_max_pool2d.out", TORCH_FN(wrapper_CPU_adaptive_max_pool2d_out_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
return wrapper_CPU_adaptive_max_pool2d(self, output_size);
}
::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) {
return wrapper_CPU_adaptive_max_pool2d_out_out(self, output_size, out, indices);
}
::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
return wrapper_CPU_adaptive_max_pool2d_out_out(self, output_size, out, indices);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_adaptive_max_pool3d_out_cpu_functional final : public at::native::structured_adaptive_max_pool3d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 2> outputs_;
};
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
structured_adaptive_max_pool3d_out_cpu_functional op;
op.meta(self, output_size);
op.impl(self, output_size, op.outputs_[0], op.outputs_[1]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]));
}
struct structured_adaptive_max_pool3d_out_cpu_out final : public at::native::structured_adaptive_max_pool3d_out_cpu {
    structured_adaptive_max_pool3d_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 2> outputs_;
    std::array<::std::optional<Tensor>, 2> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_adaptive_max_pool3d_out_out(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
structured_adaptive_max_pool3d_out_cpu_out op(out, indices);
op.meta(self, output_size);
op.impl(self, output_size, op.maybe_get_output(0), op.maybe_get_output(1));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
return std::forward_as_tuple(out, indices);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("adaptive_max_pool3d", TORCH_FN(wrapper_CPU_adaptive_max_pool3d));
m.impl("adaptive_max_pool3d.out", TORCH_FN(wrapper_CPU_adaptive_max_pool3d_out_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
return wrapper_CPU_adaptive_max_pool3d(self, output_size);
}
::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) {
return wrapper_CPU_adaptive_max_pool3d_out_out(self, output_size, out, indices);
}
::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
return wrapper_CPU_adaptive_max_pool3d_out_out(self, output_size, out, indices);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_avg_pool2d_backward_out_cpu_functional final : public at::native::structured_avg_pool2d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
structured_avg_pool2d_backward_out_cpu_functional op;
op.meta(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
op.impl(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_avg_pool2d_backward_out_cpu_out final : public at::native::structured_avg_pool2d_backward_out_cpu {
    structured_avg_pool2d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_avg_pool2d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input) {
structured_avg_pool2d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
op.impl(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("avg_pool2d_backward", TORCH_FN(wrapper_CPU_avg_pool2d_backward));
m.impl("avg_pool2d_backward.grad_input", TORCH_FN(wrapper_CPU_avg_pool2d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
return wrapper_CPU_avg_pool2d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}
at::Tensor & avg_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
return wrapper_CPU_avg_pool2d_backward_out_grad_input(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
}
at::Tensor & avg_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input) {
return wrapper_CPU_avg_pool2d_backward_out_grad_input(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_max_pool2d_with_indices_out_cpu_functional final : public at::native::structured_max_pool2d_with_indices_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 2> outputs_;
};
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
structured_max_pool2d_with_indices_out_cpu_functional op;
op.meta(self, kernel_size, stride, padding, dilation, ceil_mode);
op.impl(self, kernel_size, stride, padding, dilation, ceil_mode, op.outputs_[0], op.outputs_[1]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]));
}
struct structured_max_pool2d_with_indices_out_cpu_out final : public at::native::structured_max_pool2d_with_indices_out_cpu {
    structured_max_pool2d_with_indices_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 2> outputs_;
    std::array<::std::optional<Tensor>, 2> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_max_pool2d_with_indices_out_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
structured_max_pool2d_with_indices_out_cpu_out op(out, indices);
op.meta(self, kernel_size, stride, padding, dilation, ceil_mode);
op.impl(self, kernel_size, stride, padding, dilation, ceil_mode, op.maybe_get_output(0), op.maybe_get_output(1));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
return std::forward_as_tuple(out, indices);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("max_pool2d_with_indices", TORCH_FN(wrapper_CPU_max_pool2d_with_indices));
m.impl("max_pool2d_with_indices.out", TORCH_FN(wrapper_CPU_max_pool2d_with_indices_out_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
return wrapper_CPU_max_pool2d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
}
::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
return wrapper_CPU_max_pool2d_with_indices_out_out(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
}
::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
return wrapper_CPU_max_pool2d_with_indices_out_out(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_linear1d_out_cpu_functional final : public at::native::structured_upsample_linear1d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_linear1d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales) {
structured_upsample_linear1d_out_cpu_functional op;
op.meta(self, output_size, align_corners, scales);
op.impl(self, output_size, align_corners, scales, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_linear1d_out_cpu_out final : public at::native::structured_upsample_linear1d_out_cpu {
    structured_upsample_linear1d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_linear1d_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales, at::Tensor & out) {
structured_upsample_linear1d_out_cpu_out op(out);
op.meta(self, output_size, align_corners, scales);
op.impl(self, output_size, align_corners, scales, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_linear1d", TORCH_FN(wrapper_CPU_upsample_linear1d));
m.impl("upsample_linear1d.out", TORCH_FN(wrapper_CPU_upsample_linear1d_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_linear1d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales) {
return wrapper_CPU_upsample_linear1d(self, output_size, align_corners, scales);
}
at::Tensor upsample_linear1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales) {
return wrapper_CPU_upsample_linear1d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales);
}
at::Tensor & upsample_linear1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales) {
return wrapper_CPU_upsample_linear1d_out_out(self, output_size, align_corners, scales, out);
}
at::Tensor & upsample_linear1d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales, at::Tensor & out) {
return wrapper_CPU_upsample_linear1d_out_out(self, output_size, align_corners, scales, out);
}
at::Tensor & upsample_linear1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales) {
return wrapper_CPU_upsample_linear1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales, out);
}
at::Tensor & upsample_linear1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales, at::Tensor & out) {
return wrapper_CPU_upsample_linear1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_linear1d_backward_out_cpu_functional final : public at::native::structured_upsample_linear1d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales) {
structured_upsample_linear1d_backward_out_cpu_functional op;
op.meta(grad_output, output_size, input_size, align_corners, scales);
op.impl(grad_output, output_size, input_size, align_corners, scales, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_linear1d_backward_out_cpu_out final : public at::native::structured_upsample_linear1d_backward_out_cpu {
    structured_upsample_linear1d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_linear1d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales, at::Tensor & grad_input) {
structured_upsample_linear1d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, output_size, input_size, align_corners, scales);
op.impl(grad_output, output_size, input_size, align_corners, scales, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_linear1d_backward", TORCH_FN(wrapper_CPU_upsample_linear1d_backward));
m.impl("upsample_linear1d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_linear1d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales) {
return wrapper_CPU_upsample_linear1d_backward(grad_output, output_size, input_size, align_corners, scales);
}
at::Tensor upsample_linear1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales) {
return wrapper_CPU_upsample_linear1d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales);
}
at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales) {
return wrapper_CPU_upsample_linear1d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales, grad_input);
}
at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales, at::Tensor & grad_input) {
return wrapper_CPU_upsample_linear1d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales, grad_input);
}
at::Tensor & upsample_linear1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales) {
return wrapper_CPU_upsample_linear1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales, grad_input);
}
at::Tensor & upsample_linear1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales, at::Tensor & grad_input) {
return wrapper_CPU_upsample_linear1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_bicubic2d_out_cpu_functional final : public at::native::structured_upsample_bicubic2d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured_upsample_bicubic2d_out_cpu_functional op;
op.meta(self, output_size, align_corners, scales_h, scales_w);
op.impl(self, output_size, align_corners, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_bicubic2d_out_cpu_out final : public at::native::structured_upsample_bicubic2d_out_cpu {
    structured_upsample_bicubic2d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_bicubic2d_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
structured_upsample_bicubic2d_out_cpu_out op(out);
op.meta(self, output_size, align_corners, scales_h, scales_w);
op.impl(self, output_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_bicubic2d", TORCH_FN(wrapper_CPU_upsample_bicubic2d));
m.impl("upsample_bicubic2d.out", TORCH_FN(wrapper_CPU_upsample_bicubic2d_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bicubic2d(self, output_size, align_corners, scales_h, scales_w);
}
at::Tensor upsample_bicubic2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bicubic2d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
}
at::Tensor & upsample_bicubic2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bicubic2d_out_out(self, output_size, align_corners, scales_h, scales_w, out);
}
at::Tensor & upsample_bicubic2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU_upsample_bicubic2d_out_out(self, output_size, align_corners, scales_h, scales_w, out);
}
at::Tensor & upsample_bicubic2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bicubic2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
}
at::Tensor & upsample_bicubic2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU_upsample_bicubic2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_bicubic2d_backward_out_cpu_functional final : public at::native::structured_upsample_bicubic2d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured_upsample_bicubic2d_backward_out_cpu_functional op;
op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_bicubic2d_backward_out_cpu_out final : public at::native::structured_upsample_bicubic2d_backward_out_cpu {
    structured_upsample_bicubic2d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_bicubic2d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
structured_upsample_bicubic2d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_bicubic2d_backward", TORCH_FN(wrapper_CPU_upsample_bicubic2d_backward));
m.impl("upsample_bicubic2d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_bicubic2d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bicubic2d_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}
at::Tensor upsample_bicubic2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bicubic2d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
}
at::Tensor & upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bicubic2d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_bicubic2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU_upsample_bicubic2d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_bicubic2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_bicubic2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & upsample_bicubic2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU_upsample_bicubic2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__upsample_bicubic2d_aa_backward_out_cpu_functional final : public at::native::structured__upsample_bicubic2d_aa_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured__upsample_bicubic2d_aa_backward_out_cpu_functional op;
op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured__upsample_bicubic2d_aa_backward_out_cpu_out final : public at::native::structured__upsample_bicubic2d_aa_backward_out_cpu {
    structured__upsample_bicubic2d_aa_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
structured__upsample_bicubic2d_aa_backward_out_cpu_out op(grad_input);
op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_upsample_bicubic2d_aa_backward", TORCH_FN(wrapper_CPU__upsample_bicubic2d_aa_backward));
m.impl("_upsample_bicubic2d_aa_backward.grad_input", TORCH_FN(wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bicubic2d_aa_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}
at::Tensor _upsample_bicubic2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bicubic2d_aa_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
}
at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
}
at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
return wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_nearest1d_out_cpu_functional final : public at::native::structured_upsample_nearest1d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_nearest1d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales) {
structured_upsample_nearest1d_out_cpu_functional op;
op.meta(self, output_size, scales);
op.impl(self, output_size, scales, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_nearest1d_out_cpu_out final : public at::native::structured_upsample_nearest1d_out_cpu {
    structured_upsample_nearest1d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_nearest1d_out_out(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
structured_upsample_nearest1d_out_cpu_out op(out);
op.meta(self, output_size, scales);
op.impl(self, output_size, scales, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_nearest1d", TORCH_FN(wrapper_CPU_upsample_nearest1d));
m.impl("upsample_nearest1d.out", TORCH_FN(wrapper_CPU_upsample_nearest1d_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_nearest1d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales) {
return wrapper_CPU_upsample_nearest1d(self, output_size, scales);
}
at::Tensor upsample_nearest1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales) {
return wrapper_CPU_upsample_nearest1d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales);
}
at::Tensor & upsample_nearest1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales) {
return wrapper_CPU_upsample_nearest1d_out_out(self, output_size, scales, out);
}
at::Tensor & upsample_nearest1d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
return wrapper_CPU_upsample_nearest1d_out_out(self, output_size, scales, out);
}
at::Tensor & upsample_nearest1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales) {
return wrapper_CPU_upsample_nearest1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales, out);
}
at::Tensor & upsample_nearest1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
return wrapper_CPU_upsample_nearest1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__upsample_nearest_exact1d_out_cpu_functional final : public at::native::structured__upsample_nearest_exact1d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU__upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales) {
structured__upsample_nearest_exact1d_out_cpu_functional op;
op.meta(self, output_size, scales);
op.impl(self, output_size, scales, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured__upsample_nearest_exact1d_out_cpu_out final : public at::native::structured__upsample_nearest_exact1d_out_cpu {
    structured__upsample_nearest_exact1d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU__upsample_nearest_exact1d_out_out(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
structured__upsample_nearest_exact1d_out_cpu_out op(out);
op.meta(self, output_size, scales);
op.impl(self, output_size, scales, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_upsample_nearest_exact1d", TORCH_FN(wrapper_CPU__upsample_nearest_exact1d));
m.impl("_upsample_nearest_exact1d.out", TORCH_FN(wrapper_CPU__upsample_nearest_exact1d_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales) {
return wrapper_CPU__upsample_nearest_exact1d(self, output_size, scales);
}
at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales) {
return wrapper_CPU__upsample_nearest_exact1d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales);
}
at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales) {
return wrapper_CPU__upsample_nearest_exact1d_out_out(self, output_size, scales, out);
}
at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
return wrapper_CPU__upsample_nearest_exact1d_out_out(self, output_size, scales, out);
}
at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales) {
return wrapper_CPU__upsample_nearest_exact1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales, out);
}
at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
return wrapper_CPU__upsample_nearest_exact1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_nearest1d_backward_out_cpu_functional final : public at::native::structured_upsample_nearest1d_backward_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales) {
structured_upsample_nearest1d_backward_out_cpu_functional op;
op.meta(grad_output, output_size, input_size, scales);
op.impl(grad_output, output_size, input_size, scales, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_nearest1d_backward_out_cpu_out final : public at::native::structured_upsample_nearest1d_backward_out_cpu {
    structured_upsample_nearest1d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_nearest1d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
structured_upsample_nearest1d_backward_out_cpu_out op(grad_input);
op.meta(grad_output, output_size, input_size, scales);
op.impl(grad_output, output_size, input_size, scales, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return grad_input;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_nearest1d_backward", TORCH_FN(wrapper_CPU_upsample_nearest1d_backward));
m.impl("upsample_nearest1d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_nearest1d_backward_out_grad_input));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales) {
return wrapper_CPU_upsample_nearest1d_backward(grad_output, output_size, input_size, scales);
}
at::Tensor upsample_nearest1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
return wrapper_CPU_upsample_nearest1d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales);
}
at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales) {
return wrapper_CPU_upsample_nearest1d_backward_out_grad_input(grad_output, output_size, input_size, scales, grad_input);
}
at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
return wrapper_CPU_upsample_nearest1d_backward_out_grad_input(grad_output, output_size, input_size, scales, grad_input);
}
at::Tensor & upsample_nearest1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
return wrapper_CPU_upsample_nearest1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales, grad_input);
}
at::Tensor & upsample_nearest1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
return wrapper_CPU_upsample_nearest1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales, grad_input);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_upsample_nearest3d_out_cpu_functional final : public at::native::structured_upsample_nearest3d_out_cpu {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_upsample_nearest3d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
structured_upsample_nearest3d_out_cpu_functional op;
op.meta(self, output_size, scales_d, scales_h, scales_w);
op.impl(self, output_size, scales_d, scales_h, scales_w, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_upsample_nearest3d_out_cpu_out final : public at::native::structured_upsample_nearest3d_out_cpu {
    structured_upsample_nearest3d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_upsample_nearest3d_out_out(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
structured_upsample_nearest3d_out_cpu_out op(out);
op.meta(self, output_size, scales_d, scales_h, scales_w);
op.impl(self, output_size, scales_d, scales_h, scales_w, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("upsample_nearest3d", TORCH_FN(wrapper_CPU_upsample_nearest3d));
m.impl("upsample_nearest3d.out", TORCH_FN(wrapper_CPU_upsample_nearest3d_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor upsample_nearest3d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest3d(self, output_size, scales_d, scales_h, scales_w);
}
at::Tensor upsample_nearest3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest3d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w);
}
at::Tensor & upsample_nearest3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest3d_out_out(self, output_size, scales_d, scales_h, scales_w, out);
}
at::Tensor & upsample_nearest3d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU_upsample_nearest3d_out_out(self, output_size, scales_d, scales_h, scales_w, out);
}
at::Tensor & upsample_nearest3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
return wrapper_CPU_upsample_nearest3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w, out);
}
at::Tensor & upsample_nearest3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
return wrapper_CPU_upsample_nearest3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU__slow_conv_transpose3d(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) {
    // No device check
  // DeviceGuard omitted
  return at::native::slow_conv_transpose3d_cpu(self, weight, C10_AS_INTARRAYREF_SLOW(kernel_size), bias, C10_AS_INTARRAYREF_SLOW(stride), C10_AS_INTARRAYREF_SLOW(padding), C10_AS_INTARRAYREF_SLOW(output_padding), C10_AS_INTARRAYREF_SLOW(dilation));
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_CPU_out_slow_conv_transpose3d_out(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::slow_conv_transpose3d_out_cpu(self, weight, C10_AS_INTARRAYREF_SLOW(kernel_size), bias, C10_AS_INTARRAYREF_SLOW(stride), C10_AS_INTARRAYREF_SLOW(padding), C10_AS_INTARRAYREF_SLOW(output_padding), C10_AS_INTARRAYREF_SLOW(dilation), out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("slow_conv_transpose3d",
TORCH_FN(wrapper_CPU__slow_conv_transpose3d));
m.impl("slow_conv_transpose3d.out",
TORCH_FN(wrapper_CPU_out_slow_conv_transpose3d_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor slow_conv_transpose3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation) {
return wrapper_CPU__slow_conv_transpose3d(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation));
}
at::Tensor slow_conv_transpose3d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) {
return wrapper_CPU__slow_conv_transpose3d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
}
at::Tensor & slow_conv_transpose3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation) {
return wrapper_CPU_out_slow_conv_transpose3d_out(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out);
}
at::Tensor & slow_conv_transpose3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
return wrapper_CPU_out_slow_conv_transpose3d_out(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out);
}
at::Tensor & slow_conv_transpose3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) {
return wrapper_CPU_out_slow_conv_transpose3d_out(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
}
at::Tensor & slow_conv_transpose3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
return wrapper_CPU_out_slow_conv_transpose3d_out(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_isposinf_out_functional final : public at::native::structured_isposinf_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_isposinf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_isposinf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_isposinf(const at::Tensor & self) {
structured_isposinf_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_isposinf_out_out final : public at::native::structured_isposinf_out {
    structured_isposinf_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_isposinf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_isposinf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_isposinf_out_out(const at::Tensor & self, at::Tensor & out) {
structured_isposinf_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("isposinf", TORCH_FN(wrapper_CPU_isposinf));
m.impl("isposinf.out", TORCH_FN(wrapper_CPU_isposinf_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor isposinf(const at::Tensor & self) {
return wrapper_CPU_isposinf(self);
}
at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_isposinf_out_out(self, out);
}
at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_isposinf_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_entr_out_functional final : public at::native::structured_special_entr_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_entr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_entr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_entr(const at::Tensor & self) {
structured_special_entr_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_entr_out_out final : public at::native::structured_special_entr_out {
    structured_special_entr_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_entr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_entr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_entr_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_entr_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_entr", TORCH_FN(wrapper_CPU_special_entr));
m.impl("special_entr.out", TORCH_FN(wrapper_CPU_special_entr_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_entr(const at::Tensor & self) {
return wrapper_CPU_special_entr(self);
}
at::Tensor & special_entr_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_entr_out_out(self, out);
}
at::Tensor & special_entr_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_entr_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_zeta_out_functional final : public at::native::structured_special_zeta_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_zeta_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_zeta_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_zeta(const at::Tensor & self, const at::Tensor & other) {
structured_special_zeta_out_functional op;
op.meta(self, other);
op.impl(self, other, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_zeta_out_out final : public at::native::structured_special_zeta_out {
    structured_special_zeta_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_zeta_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_zeta_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_zeta_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
structured_special_zeta_out_out op(out);
op.meta(self, other);
op.impl(self, other, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_zeta", TORCH_FN(wrapper_CPU_special_zeta));
m.impl("special_zeta.out", TORCH_FN(wrapper_CPU_special_zeta_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_zeta(const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_special_zeta(self, other);
}
at::Tensor & special_zeta_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_CPU_special_zeta_out_out(self, other, out);
}
at::Tensor & special_zeta_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_CPU_special_zeta_out_out(self, other, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_i0e_out_functional final : public at::native::structured_special_i0e_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i0e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i0e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_i0e(const at::Tensor & self) {
structured_special_i0e_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_i0e_out_out final : public at::native::structured_special_i0e_out {
    structured_special_i0e_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i0e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_i0e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_i0e_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_i0e_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_i0e", TORCH_FN(wrapper_CPU_special_i0e));
m.impl("special_i0e.out", TORCH_FN(wrapper_CPU_special_i0e_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_i0e(const at::Tensor & self) {
return wrapper_CPU_special_i0e(self);
}
at::Tensor & special_i0e_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_i0e_out_out(self, out);
}
at::Tensor & special_i0e_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_i0e_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_linalg_lu_out_functional final : public at::native::structured_linalg_lu_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 3> outputs_;
};
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_linalg_lu(const at::Tensor & A, bool pivot) {
structured_linalg_lu_out_functional op;
op.meta(A, pivot);
op.impl(A, pivot, op.outputs_[0], op.outputs_[1], op.outputs_[2]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]), std::move(op.outputs_[2]));
}
struct structured_linalg_lu_out_out final : public at::native::structured_linalg_lu_out {
    structured_linalg_lu_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 3> outputs_;
    std::array<::std::optional<Tensor>, 3> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_linalg_lu_out_out(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
structured_linalg_lu_out_out op(P, L, U);
op.meta(A, pivot);
op.impl(A, pivot, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(*op.proxy_outputs_[2]);
return std::forward_as_tuple(P, L, U);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_lu", TORCH_FN(wrapper_CPU_linalg_lu));
m.impl("linalg_lu.out", TORCH_FN(wrapper_CPU_linalg_lu_out_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu(const at::Tensor & A, bool pivot) {
return wrapper_CPU_linalg_lu(A, pivot);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot) {
return wrapper_CPU_linalg_lu_out_out(A, pivot, P, L, U);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
return wrapper_CPU_linalg_lu_out_out(A, pivot, P, L, U);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__linalg_det_out_functional final : public at::native::structured__linalg_det_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 3> outputs_;
};
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__linalg_det(const at::Tensor & A) {
structured__linalg_det_out_functional op;
op.meta(A);
op.impl(A, op.outputs_[0], op.outputs_[1], op.outputs_[2]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]), std::move(op.outputs_[2]));
}
struct structured__linalg_det_out_out final : public at::native::structured__linalg_det_out {
    structured__linalg_det_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 3> outputs_;
    std::array<::std::optional<Tensor>, 3> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU__linalg_det_out_result(const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) {
structured__linalg_det_out_out op(result, LU, pivots);
op.meta(A);
op.impl(A, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(*op.proxy_outputs_[2]);
return std::forward_as_tuple(result, LU, pivots);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_linalg_det", TORCH_FN(wrapper_CPU__linalg_det));
m.impl("_linalg_det.result", TORCH_FN(wrapper_CPU__linalg_det_out_result));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det(const at::Tensor & A) {
return wrapper_CPU__linalg_det(A);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_out(at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A) {
return wrapper_CPU__linalg_det_out_result(A, result, LU, pivots);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_outf(const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) {
return wrapper_CPU__linalg_det_out_result(A, result, LU, pivots);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__linalg_eig(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::linalg_eig(self);
}
} // anonymous namespace
namespace {
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_out_linalg_eig_out(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
    // No device check
  // DeviceGuard omitted
  return at::native::linalg_eig_out(self, eigenvalues, eigenvectors);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_eig",
TORCH_FN(wrapper_CPU__linalg_eig));
m.impl("linalg_eig.out",
TORCH_FN(wrapper_CPU_out_linalg_eig_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> linalg_eig(const at::Tensor & self) {
return wrapper_CPU__linalg_eig(self);
}
::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self) {
return wrapper_CPU_out_linalg_eig_out(self, eigenvalues, eigenvectors);
}
::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_outf(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
return wrapper_CPU_out_linalg_eig_out(self, eigenvalues, eigenvectors);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___linalg_eigvals(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_linalg_eigvals(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_linalg_eigvals",
TORCH_FN(wrapper_CPU___linalg_eigvals));
}
} // anonymous namespace
namespace cpu {
at::Tensor _linalg_eigvals(const at::Tensor & self) {
return wrapper_CPU___linalg_eigvals(self);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured__linalg_svd_out_functional final : public at::native::structured__linalg_svd_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 3> outputs_;
};
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__linalg_svd(const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver) {
structured__linalg_svd_out_functional op;
op.meta(A, full_matrices, compute_uv, driver);
op.impl(A, full_matrices, compute_uv, driver, op.outputs_[0], op.outputs_[1], op.outputs_[2]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]), std::move(op.outputs_[2]));
}
struct structured__linalg_svd_out_out final : public at::native::structured__linalg_svd_out {
    structured__linalg_svd_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 3> outputs_;
    std::array<::std::optional<Tensor>, 3> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU__linalg_svd_out_U(const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
structured__linalg_svd_out_out op(U, S, Vh);
op.meta(A, full_matrices, compute_uv, driver);
op.impl(A, full_matrices, compute_uv, driver, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(*op.proxy_outputs_[2]);
return std::forward_as_tuple(U, S, Vh);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_linalg_svd", TORCH_FN(wrapper_CPU__linalg_svd));
m.impl("_linalg_svd.U", TORCH_FN(wrapper_CPU__linalg_svd_out_U));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd(const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver) {
return wrapper_CPU__linalg_svd(A, full_matrices, compute_uv, driver);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver) {
return wrapper_CPU__linalg_svd_out_U(A, full_matrices, compute_uv, driver, U, S, Vh);
}
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_outf(const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
return wrapper_CPU__linalg_svd_out_U(A, full_matrices, compute_uv, driver, U, S, Vh);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_linalg_qr_out_functional final : public at::native::structured_linalg_qr_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 2> outputs_;
};
::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_linalg_qr(const at::Tensor & A, c10::string_view mode) {
structured_linalg_qr_out_functional op;
op.meta(A, mode);
op.impl(A, mode, op.outputs_[0], op.outputs_[1]);
return std::make_tuple(std::move(op.outputs_[0]), std::move(op.outputs_[1]));
}
struct structured_linalg_qr_out_out final : public at::native::structured_linalg_qr_out {
    structured_linalg_qr_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 2> outputs_;
    std::array<::std::optional<Tensor>, 2> proxy_outputs_;
};
::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_linalg_qr_out_out(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) {
structured_linalg_qr_out_out op(Q, R);
op.meta(A, mode);
op.impl(A, mode, op.maybe_get_output(0), op.maybe_get_output(1));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(*op.proxy_outputs_[1]);
return std::forward_as_tuple(Q, R);
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("linalg_qr", TORCH_FN(wrapper_CPU_linalg_qr));
m.impl("linalg_qr.out", TORCH_FN(wrapper_CPU_linalg_qr_out_out));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor> linalg_qr(const at::Tensor & A, c10::string_view mode) {
return wrapper_CPU_linalg_qr(A, mode);
}
::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & A, c10::string_view mode) {
return wrapper_CPU_linalg_qr_out_out(A, mode, Q, R);
}
::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_outf(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) {
return wrapper_CPU_linalg_qr_out_out(A, mode, Q, R);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___test_optional_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
    // No device check
  // DeviceGuard omitted
  return at::native::_test_optional_intlist(values, addends);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_test_optional_intlist",
TORCH_FN(wrapper_CPU___test_optional_intlist));
}
} // anonymous namespace
namespace cpu {
at::Tensor _test_optional_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
return wrapper_CPU___test_optional_intlist(values, addends);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_CPU___jagged_to_padded_dense_forward(const at::Tensor & values, at::TensorList offsets, c10::SymIntArrayRef max_lengths, double padding_value) {
    // No device check
  // DeviceGuard omitted
  return at::native::_jagged_to_padded_dense_forward_cpu(values, offsets, C10_AS_INTARRAYREF_SLOW(max_lengths), padding_value);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_jagged_to_padded_dense_forward",
TORCH_FN(wrapper_CPU___jagged_to_padded_dense_forward));
}
} // anonymous namespace
namespace cpu {
at::Tensor _jagged_to_padded_dense_forward(const at::Tensor & values, at::TensorList offsets, at::IntArrayRef max_lengths, double padding_value) {
return wrapper_CPU___jagged_to_padded_dense_forward(values, offsets, c10::fromIntArrayRefSlow(max_lengths), padding_value);
}
at::Tensor _jagged_to_padded_dense_forward_symint(const at::Tensor & values, at::TensorList offsets, c10::SymIntArrayRef max_lengths, double padding_value) {
return wrapper_CPU___jagged_to_padded_dense_forward(values, offsets, max_lengths, padding_value);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___scaled_dot_product_flash_attention_for_cpu_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & attn_mask, ::std::optional<double> scale) {
    // No device check
  // DeviceGuard omitted
  return at::native::_scaled_dot_product_flash_attention_cpu_backward(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_scaled_dot_product_flash_attention_for_cpu_backward",
TORCH_FN(wrapper_CPU___scaled_dot_product_flash_attention_for_cpu_backward));
}
} // anonymous namespace
namespace cpu {
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_for_cpu_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & attn_mask, ::std::optional<double> scale) {
return wrapper_CPU___scaled_dot_product_flash_attention_for_cpu_backward(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_airy_ai_out_functional final : public at::native::structured_special_airy_ai_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_airy_ai_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_airy_ai_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_airy_ai(const at::Tensor & x) {
structured_special_airy_ai_out_functional op;
op.meta(x);
op.impl(x, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_airy_ai_out_out final : public at::native::structured_special_airy_ai_out {
    structured_special_airy_ai_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_airy_ai_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_airy_ai_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_airy_ai_out_out(const at::Tensor & x, at::Tensor & out) {
structured_special_airy_ai_out_out op(out);
op.meta(x);
op.impl(x, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_airy_ai", TORCH_FN(wrapper_CPU_special_airy_ai));
m.impl("special_airy_ai.out", TORCH_FN(wrapper_CPU_special_airy_ai_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_airy_ai(const at::Tensor & x) {
return wrapper_CPU_special_airy_ai(x);
}
at::Tensor & special_airy_ai_out(at::Tensor & out, const at::Tensor & x) {
return wrapper_CPU_special_airy_ai_out_out(x, out);
}
at::Tensor & special_airy_ai_outf(const at::Tensor & x, at::Tensor & out) {
return wrapper_CPU_special_airy_ai_out_out(x, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_bessel_j0_out_functional final : public at::native::structured_special_bessel_j0_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_bessel_j0(const at::Tensor & self) {
structured_special_bessel_j0_out_functional op;
op.meta(self);
op.impl(self, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_bessel_j0_out_out final : public at::native::structured_special_bessel_j0_out {
    structured_special_bessel_j0_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_bessel_j0_out_out(const at::Tensor & self, at::Tensor & out) {
structured_special_bessel_j0_out_out op(out);
op.meta(self);
op.impl(self, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_bessel_j0", TORCH_FN(wrapper_CPU_special_bessel_j0));
m.impl("special_bessel_j0.out", TORCH_FN(wrapper_CPU_special_bessel_j0_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_bessel_j0(const at::Tensor & self) {
return wrapper_CPU_special_bessel_j0(self);
}
at::Tensor & special_bessel_j0_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_CPU_special_bessel_j0_out_out(self, out);
}
at::Tensor & special_bessel_j0_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_CPU_special_bessel_j0_out_out(self, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_chebyshev_polynomial_v_out_functional final : public at::native::structured_special_chebyshev_polynomial_v_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
structured_special_chebyshev_polynomial_v_out_functional op;
op.meta(x, n);
op.impl(x, n, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_chebyshev_polynomial_v_out_out final : public at::native::structured_special_chebyshev_polynomial_v_out {
    structured_special_chebyshev_polynomial_v_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_chebyshev_polynomial_v_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
structured_special_chebyshev_polynomial_v_out_out op(out);
op.meta(x, n);
op.impl(x, n, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_chebyshev_polynomial_v", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_v));
m.impl("special_chebyshev_polynomial_v.out", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_v_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_chebyshev_polynomial_v(x, n);
}
at::Tensor & special_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_chebyshev_polynomial_v_out_out(x, n, out);
}
at::Tensor & special_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
return wrapper_CPU_special_chebyshev_polynomial_v_out_out(x, n, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_chebyshev_polynomial_w_out_functional final : public at::native::structured_special_chebyshev_polynomial_w_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
structured_special_chebyshev_polynomial_w_out_functional op;
op.meta(x, n);
op.impl(x, n, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_chebyshev_polynomial_w_out_out final : public at::native::structured_special_chebyshev_polynomial_w_out {
    structured_special_chebyshev_polynomial_w_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_chebyshev_polynomial_w_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
structured_special_chebyshev_polynomial_w_out_out op(out);
op.meta(x, n);
op.impl(x, n, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_chebyshev_polynomial_w", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_w));
m.impl("special_chebyshev_polynomial_w.out", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_w_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_chebyshev_polynomial_w(x, n);
}
at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_chebyshev_polynomial_w_out_out(x, n, out);
}
at::Tensor & special_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
return wrapper_CPU_special_chebyshev_polynomial_w_out_out(x, n, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_hermite_polynomial_he_out_functional final : public at::native::structured_special_hermite_polynomial_he_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_hermite_polynomial_he_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_hermite_polynomial_he_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_hermite_polynomial_he(const at::Tensor & x, const at::Tensor & n) {
structured_special_hermite_polynomial_he_out_functional op;
op.meta(x, n);
op.impl(x, n, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_hermite_polynomial_he_out_out final : public at::native::structured_special_hermite_polynomial_he_out {
    structured_special_hermite_polynomial_he_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_hermite_polynomial_he_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_hermite_polynomial_he_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_hermite_polynomial_he_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
structured_special_hermite_polynomial_he_out_out op(out);
op.meta(x, n);
op.impl(x, n, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_hermite_polynomial_he", TORCH_FN(wrapper_CPU_special_hermite_polynomial_he));
m.impl("special_hermite_polynomial_he.out", TORCH_FN(wrapper_CPU_special_hermite_polynomial_he_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_hermite_polynomial_he(const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_hermite_polynomial_he(x, n);
}
at::Tensor & special_hermite_polynomial_he_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_hermite_polynomial_he_out_out(x, n, out);
}
at::Tensor & special_hermite_polynomial_he_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
return wrapper_CPU_special_hermite_polynomial_he_out_out(x, n, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_laguerre_polynomial_l_out_functional final : public at::native::structured_special_laguerre_polynomial_l_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_laguerre_polynomial_l_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_laguerre_polynomial_l_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_laguerre_polynomial_l(const at::Tensor & x, const at::Tensor & n) {
structured_special_laguerre_polynomial_l_out_functional op;
op.meta(x, n);
op.impl(x, n, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_laguerre_polynomial_l_out_out final : public at::native::structured_special_laguerre_polynomial_l_out {
    structured_special_laguerre_polynomial_l_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_laguerre_polynomial_l_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_laguerre_polynomial_l_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_laguerre_polynomial_l_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
structured_special_laguerre_polynomial_l_out_out op(out);
op.meta(x, n);
op.impl(x, n, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_laguerre_polynomial_l", TORCH_FN(wrapper_CPU_special_laguerre_polynomial_l));
m.impl("special_laguerre_polynomial_l.out", TORCH_FN(wrapper_CPU_special_laguerre_polynomial_l_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_laguerre_polynomial_l(const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_laguerre_polynomial_l(x, n);
}
at::Tensor & special_laguerre_polynomial_l_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_laguerre_polynomial_l_out_out(x, n, out);
}
at::Tensor & special_laguerre_polynomial_l_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
return wrapper_CPU_special_laguerre_polynomial_l_out_out(x, n, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
struct structured_special_shifted_chebyshev_polynomial_v_out_functional final : public at::native::structured_special_shifted_chebyshev_polynomial_v_out {
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        outputs_[output_idx] = create_out(sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return outputs_[output_idx];
    }
    std::array<Tensor, 1> outputs_;
};
at::Tensor wrapper_CPU_special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
structured_special_shifted_chebyshev_polynomial_v_out_functional op;
op.meta(x, n);
op.impl(x, n, op.outputs_[0]);
return std::move(op.outputs_[0]);
}
struct structured_special_shifted_chebyshev_polynomial_v_out_out final : public at::native::structured_special_shifted_chebyshev_polynomial_v_out {
    structured_special_shifted_chebyshev_polynomial_v_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
    void set_output_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
        if (C10_UNLIKELY(maybe_proxy.has_value())) {
            proxy_outputs_[output_idx] = std::move(maybe_proxy).value();
        }
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    void set_output_raw_strided(
        int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
        TensorOptions options, DimnameList names
    ) override {
        const auto& out = outputs_[output_idx].get();
        resize_out(out, sizes, strides, options);
        if (!names.empty()) {
          namedinference::propagate_names(outputs_[output_idx], names);
        }
        // super must happen after, so that downstream can use maybe_get_output
        // to retrieve the output
        at::native::structured_special_shifted_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
    }
    const Tensor& maybe_get_output(int64_t output_idx) override {
      return proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get();
    }
    std::array<std::reference_wrapper<Tensor>, 1> outputs_;
    std::array<::std::optional<Tensor>, 1> proxy_outputs_;
};
at::Tensor & wrapper_CPU_special_shifted_chebyshev_polynomial_v_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
structured_special_shifted_chebyshev_polynomial_v_out_out op(out);
op.meta(x, n);
op.impl(x, n, op.maybe_get_output(0));
if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(*op.proxy_outputs_[0]);
return out;
}
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("special_shifted_chebyshev_polynomial_v", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_v));
m.impl("special_shifted_chebyshev_polynomial_v.out", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_v_out_out));
}
} // anonymous namespace
namespace cpu {
at::Tensor special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_v(x, n);
}
at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_v_out_out(x, n, out);
}
at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
return wrapper_CPU_special_shifted_chebyshev_polynomial_v_out_out(x, n, out);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
void wrapper_CPU___fused_adamw_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fused_adamw_kernel_cpu_(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fused_adamw_",
TORCH_FN(wrapper_CPU___fused_adamw_));
}
} // anonymous namespace
namespace cpu {
void _fused_adamw_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
return wrapper_CPU___fused_adamw_(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
void wrapper_CPU_tensor_lr__fused_adamw_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fused_adamw_kernel_cpu_(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fused_adamw_.tensor_lr",
TORCH_FN(wrapper_CPU_tensor_lr__fused_adamw_));
}
} // anonymous namespace
namespace cpu {
void _fused_adamw_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
return wrapper_CPU_tensor_lr__fused_adamw_(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}
} // namespace cpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
void wrapper_CPU___fused_adagrad_(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    // No device check
  // DeviceGuard omitted
  return at::native::_fused_adagrad_kernel_cpu_(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("_fused_adagrad_",
TORCH_FN(wrapper_CPU___fused_adagrad_));
}
} // anonymous namespace
namespace cpu {
void _fused_adagrad_(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
return wrapper_CPU___fused_adagrad_(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf);
}
} // namespace cpu
} // namespace at
