// @generated by tools/setup_helpers/generate_code.py from DispatchKeyNativeFunctions.cpp
#include <torch/csrc/lazy/core/tensor.h>
#include <torch/csrc/lazy/core/shape_inference.h>
#include <ATen/Functions.h>
#include <ATen/native/TensorConversions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/CompositeExplicitAutogradNonFunctionalFunctions.h>
#include <ATen/MetaFunctions.h>
#include <ATen/Operators.h>
#include <ATen/native/CPUFallback.h>
#include <torch/csrc/lazy/core/ir_builder.h>
#include <torch/csrc/lazy/core/lazy_graph_executor.h>
#include <torch/csrc/lazy/core/metrics.h>
#include <torch/csrc/lazy/core/shape.h>
#include <torch/csrc/lazy/generated/LazyNativeFunctions.h>
#include <torch/csrc/lazy/generated/LazyIr.h>
#include <torch/csrc/lazy/ts_backend/ts_eager_fallback.h>


namespace {
at::Tensor to_meta(const at::Tensor& tensor) {
  // undefined tensors can't be converted to the meta device, since they don't have sizes/strides
  if (!tensor.defined()) return tensor;
  auto out = at::native::empty_strided_meta_symint(tensor.sym_sizes(), tensor.sym_strides(), /*dtype=*/tensor.scalar_type(), /*layout=*/tensor.layout(), /*device=*/c10::Device(c10::kMeta), /*pin_memory=*/std::nullopt);
  // needs to handle wrapped numbers, so dtype promotion works properly.
  if (tensor.unsafeGetTensorImpl()->is_wrapped_number()) {
    out.unsafeGetTensorImpl()->set_wrapped_number(true);
  }
  return out;
}
std::optional<at::Tensor> to_meta(const std::optional<at::Tensor>& tensor) {
  if (tensor.has_value()) {
    return to_meta(*tensor);
  }
  return std::nullopt;
}

std::vector<at::Tensor> to_meta(at::ITensorListRef t_list) {
  std::vector<at::Tensor> outs;
  outs.reserve(t_list.size());
  for (const auto& tensor : t_list) {
    outs.push_back(to_meta(tensor));
  }
  return outs;
}
} // namespace

namespace torch {
namespace lazy {

    at::Tensor LazyNativeFunctions::_adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
        
        if (force_eager_fallback(at::aten::_adaptive_avg_pool2d)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_adaptive_avg_pool2d)>::call(
                self,
                c10::fromIntArrayRefSlow(output_size)
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<AdaptiveAvgPool2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()));
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape__adaptive_avg_pool2d(self, output_size);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, output_size };
                const char* schema_str = "aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<AdaptiveAvgPool2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::_adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::_adaptive_avg_pool2d_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_adaptive_avg_pool2d_backward)>::call(
                grad_output,
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<AdaptiveAvgPool2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape__adaptive_avg_pool2d_backward(grad_output, self);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self };
                const char* schema_str = "aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<AdaptiveAvgPool2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
        
        if (force_eager_fallback(at::aten::_log_softmax)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_log_softmax)>::call(
                self,
                dim,
                half_to_float
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<LogSoftmax>(lazy_self->GetIrValue(), dim, half_to_float);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::_log_softmax(self_meta, dim, half_to_float);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, half_to_float };
                const char* schema_str = "aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<LogSoftmax>(lazy_self->GetIrValue(), dim, half_to_float, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
        
        if (force_eager_fallback(at::aten::_log_softmax_backward_data)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_log_softmax_backward_data)>::call(
                grad_output,
                output,
                dim,
                input_dtype
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, output);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(output, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<LogSoftmaxBackwardData>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), dim, input_dtype);
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto out_meta = at::meta::_log_softmax_backward_data(grad_output_meta, output_meta, dim, input_dtype);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, output, dim, input_dtype };
                const char* schema_str = "aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<LogSoftmaxBackwardData>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), dim, input_dtype, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::_reshape_alias_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
        
        if (force_eager_fallback(at::aten::_reshape_alias_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_reshape_alias_copy)>::call(
                self,
                size,
                stride
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<ReshapeAliasCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride));
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::_reshape_alias_copy_symint(self_meta, size, stride);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, size, stride };
                const char* schema_str = "aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<ReshapeAliasCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
        
        if (force_eager_fallback(at::aten::_softmax)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_softmax)>::call(
                self,
                dim,
                half_to_float
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Softmax>(lazy_self->GetIrValue(), dim, half_to_float);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::_softmax(self_meta, dim, half_to_float);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, half_to_float };
                const char* schema_str = "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Softmax>(lazy_self->GetIrValue(), dim, half_to_float, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
        
        if (force_eager_fallback(at::aten::_softmax_backward_data)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(_softmax_backward_data)>::call(
                grad_output,
                output,
                dim,
                input_dtype
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, output);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(output, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SoftmaxBackwardData>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), dim, input_dtype);
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto out_meta = at::meta::_softmax_backward_data(grad_output_meta, output_meta, dim, input_dtype);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, output, dim, input_dtype };
                const char* schema_str = "aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SoftmaxBackwardData>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), dim, input_dtype, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::abs(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::abs)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(abs)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Abs>(lazy_self->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_abs(self);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::abs(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Abs>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
        
        if (force_eager_fallback(at::aten::add)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(add, Tensor)>::call(
                self,
                other,
                alpha
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(alpha, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<AddTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), node_alpha);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::add(self_meta, other_meta, alpha);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other, alpha };
                const char* schema_str = "aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<AddTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), node_alpha, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
        
        if (force_eager_fallback(at::aten::addcdiv)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(addcdiv)>::call(
                self,
                tensor1,
                tensor2,
                value
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, tensor1, tensor2);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_tensor1 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(tensor1, *common_device);
        LazyTensorPtr lazy_tensor2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(tensor2, *common_device);
        auto node_value = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(value, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Addcdiv>(lazy_self->GetIrValue(), lazy_tensor1->GetIrValue(), lazy_tensor2->GetIrValue(), node_value);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto out_meta = at::meta::addcdiv(self_meta, tensor1_meta, tensor2_meta, value);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, tensor1, tensor2, value };
                const char* schema_str = "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Addcdiv>(lazy_self->GetIrValue(), lazy_tensor1->GetIrValue(), lazy_tensor2->GetIrValue(), node_value, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
        
        if (force_eager_fallback(at::aten::addcmul)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(addcmul)>::call(
                self,
                tensor1,
                tensor2,
                value
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, tensor1, tensor2);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_tensor1 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(tensor1, *common_device);
        LazyTensorPtr lazy_tensor2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(tensor2, *common_device);
        auto node_value = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(value, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Addcmul>(lazy_self->GetIrValue(), lazy_tensor1->GetIrValue(), lazy_tensor2->GetIrValue(), node_value);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto out_meta = at::meta::addcmul(self_meta, tensor1_meta, tensor2_meta, value);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, tensor1, tensor2, value };
                const char* schema_str = "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Addcmul>(lazy_self->GetIrValue(), lazy_tensor1->GetIrValue(), lazy_tensor2->GetIrValue(), node_value, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
        
        if (force_eager_fallback(at::aten::addmm)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(addmm)>::call(
                self,
                mat1,
                mat2,
                beta,
                alpha
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, mat1, mat2);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_mat1 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mat1, *common_device);
        LazyTensorPtr lazy_mat2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mat2, *common_device);
        auto node_beta = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(beta, *common_device);
        auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(alpha, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Addmm>(lazy_self->GetIrValue(), lazy_mat1->GetIrValue(), lazy_mat2->GetIrValue(), node_beta, node_alpha);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto mat1_meta = to_meta(mat1);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = at::meta::addmm(self_meta, mat1_meta, mat2_meta, beta, alpha);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, mat1, mat2, beta, alpha };
                const char* schema_str = "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Addmm>(lazy_self->GetIrValue(), lazy_mat1->GetIrValue(), lazy_mat2->GetIrValue(), node_beta, node_alpha, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::alias_copy(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::alias_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(alias_copy)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<AliasCopy>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::alias_copy(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::alias_copy(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<AliasCopy>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::all(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::all)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(all)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<All>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::all(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::all(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<All>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::any(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::any)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(any)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Any>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::any(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::any(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Any>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor & LazyNativeFunctions::arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
        
        if (force_eager_fallback(at::aten::arange)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(arange, start_out)>::call(
                start,
                end,
                step,
                out
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(out);
        TORCH_INTERNAL_ASSERT(common_device);
        
        auto node_start = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(start, *common_device);
        auto node_end = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(end, *common_device);
        auto node_step = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(step, *common_device);
        LazyTensorPtr lazy_out = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(out, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<ArangeStartOut>(node_start, node_end, node_step, lazy_out->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_arange_out(start, end, step, out);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { start, end, step, out };
                const char* schema_str = "aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<ArangeStartOut>(node_start, node_end, node_step, lazy_out->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        lazy_out->SetInPlaceIrValue(node);
        auto& result = out;
        return result;
    }

    
    at::Tensor LazyNativeFunctions::as_strided_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
        
        if (force_eager_fallback(at::aten::as_strided_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(as_strided_copy)>::call(
                self,
                size,
                stride,
                storage_offset
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, storage_offset);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<AsStridedCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride), storage_offset ? std::make_optional(GetSymIntValue(*storage_offset)) : ::std::nullopt);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::as_strided_copy_symint(self_meta, size, stride, storage_offset);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, size, stride, storage_offset };
                const char* schema_str = "aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<AsStridedCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride), storage_offset ? std::make_optional(GetSymIntValue(*storage_offset)) : ::std::nullopt, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
        
        if (force_eager_fallback(at::aten::as_strided_scatter)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(as_strided_scatter)>::call(
                self,
                src,
                size,
                stride,
                storage_offset
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, src, storage_offset);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_src = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(src, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<AsStridedScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride), storage_offset ? std::make_optional(GetSymIntValue(*storage_offset)) : ::std::nullopt);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_as_strided_scatter_symint(self, src, size, stride, storage_offset);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, src, size, stride, storage_offset };
                const char* schema_str = "aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<AsStridedScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), GetSymIntArrayRefValue(size), GetSymIntArrayRefValue(stride), storage_offset ? std::make_optional(GetSymIntValue(*storage_offset)) : ::std::nullopt, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
        
        if (force_eager_fallback(at::aten::avg_pool2d)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(avg_pool2d)>::call(
                self,
                kernel_size,
                stride,
                padding,
                ceil_mode,
                count_include_pad,
                divisor_override
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<AvgPool2d>(lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), ceil_mode, count_include_pad, divisor_override);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::avg_pool2d(self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override };
                const char* schema_str = "aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<AvgPool2d>(lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), ceil_mode, count_include_pad, divisor_override, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
        
        if (force_eager_fallback(at::aten::avg_pool2d_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(avg_pool2d_backward)>::call(
                grad_output,
                self,
                kernel_size,
                stride,
                padding,
                ceil_mode,
                count_include_pad,
                divisor_override
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<AvgPool2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), ceil_mode, count_include_pad, divisor_override);
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto out_meta = at::meta::avg_pool2d_backward(grad_output_meta, self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override };
                const char* schema_str = "aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<AvgPool2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), ceil_mode, count_include_pad, divisor_override, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
        
        if (force_eager_fallback(at::aten::baddbmm)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(baddbmm)>::call(
                self,
                batch1,
                batch2,
                beta,
                alpha
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, batch1, batch2);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_batch1 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(batch1, *common_device);
        LazyTensorPtr lazy_batch2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(batch2, *common_device);
        auto node_beta = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(beta, *common_device);
        auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(alpha, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Baddbmm>(lazy_self->GetIrValue(), lazy_batch1->GetIrValue(), lazy_batch2->GetIrValue(), node_beta, node_alpha);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto batch1_meta = to_meta(batch1);
        auto batch2_meta = to_meta(batch2);
        auto out_meta = at::meta::baddbmm(self_meta, batch1_meta, batch2_meta, beta, alpha);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, batch1, batch2, beta, alpha };
                const char* schema_str = "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Baddbmm>(lazy_self->GetIrValue(), lazy_batch1->GetIrValue(), lazy_batch2->GetIrValue(), node_beta, node_alpha, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::bernoulli(const at::Tensor & self, ::std::optional<at::Generator> generator) {
        
        if (force_eager_fallback(at::aten::bernoulli)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(bernoulli)>::call(
                self,
                generator
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Bernoulli>(lazy_self->GetIrValue(), generator);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_bernoulli(self, generator);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, generator };
                const char* schema_str = "aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Bernoulli>(lazy_self->GetIrValue(), generator, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::bernoulli(const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
        
        if (force_eager_fallback(at::aten::bernoulli)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(bernoulli, p)>::call(
                self,
                p,
                generator
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<BernoulliP>(lazy_self->GetIrValue(), p, generator);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_bernoulli(self, p, generator);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, p, generator };
                const char* schema_str = "aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<BernoulliP>(lazy_self->GetIrValue(), p, generator, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
        
        if (force_eager_fallback(at::aten::binary_cross_entropy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(binary_cross_entropy)>::call(
                self,
                target,
                weight,
                reduction
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, target, weight);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<BinaryCrossEntropy>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_binary_cross_entropy(self, target, weight, reduction);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, target, weight, reduction };
                const char* schema_str = "aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<BinaryCrossEntropy>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
        
        if (force_eager_fallback(at::aten::binary_cross_entropy_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(binary_cross_entropy_backward)>::call(
                grad_output,
                self,
                target,
                weight,
                reduction
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self, target, weight);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<BinaryCrossEntropyBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_binary_cross_entropy_backward(grad_output, self, target, weight, reduction);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, target, weight, reduction };
                const char* schema_str = "aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<BinaryCrossEntropyBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::bitwise_and(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::bitwise_and)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(bitwise_and, Tensor)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<BitwiseAndTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::bitwise_and(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<BitwiseAndTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::bitwise_or(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::bitwise_or)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(bitwise_or, Tensor)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<BitwiseOrTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::bitwise_or(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<BitwiseOrTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::bmm(const at::Tensor & self, const at::Tensor & mat2) {
        
        if (force_eager_fallback(at::aten::bmm)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(bmm)>::call(
                self,
                mat2
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, mat2);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_mat2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mat2, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Bmm>(lazy_self->GetIrValue(), lazy_mat2->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = at::meta::bmm(self_meta, mat2_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, mat2 };
                const char* schema_str = "aten::bmm(Tensor self, Tensor mat2) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Bmm>(lazy_self->GetIrValue(), lazy_mat2->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::cat(const at::ITensorListRef & tensors, int64_t dim) {
        
        if (force_eager_fallback(at::aten::cat)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(cat)>::call(
                tensors,
                dim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(tensors);
        TORCH_INTERNAL_ASSERT(common_device);
        
        auto lazy_tensors_tensorlist = torch::lazy::GetTensorList(tensors);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Cat>(lazy_tensors_tensorlist, dim);
        if (!node) {
                    auto tensors_meta = to_meta(tensors);
        auto out_meta = at::meta::cat(tensors_meta, dim);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { tensors, dim };
                const char* schema_str = "aten::cat(Tensor[] tensors, int dim=0) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Cat>(lazy_tensors_tensorlist, dim, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::clamp(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
        
        if (force_eager_fallback(at::aten::clamp)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(clamp)>::call(
                self,
                min,
                max
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_min = min ?
                std::make_optional(torch::lazy::LazyGraphExecutor::Get()->
                    GetIrValueForScalarFromCodegen(*min, *common_device)):
                ::std::nullopt;
        auto node_max = max ?
                std::make_optional(torch::lazy::LazyGraphExecutor::Get()->
                    GetIrValueForScalarFromCodegen(*max, *common_device)):
                ::std::nullopt;
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Clamp>(lazy_self->GetIrValue(), node_min, node_max);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::clamp(self_meta, min, max);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, min, max };
                const char* schema_str = "aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Clamp>(lazy_self->GetIrValue(), node_min, node_max, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::clamp_min(const at::Tensor & self, const at::Scalar & min) {
        
        if (force_eager_fallback(at::aten::clamp_min)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(clamp_min)>::call(
                self,
                min
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_min = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(min, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<ClampMin>(lazy_self->GetIrValue(), node_min);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::clamp_min(self_meta, min);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, min };
                const char* schema_str = "aten::clamp_min(Tensor self, Scalar min) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<ClampMin>(lazy_self->GetIrValue(), node_min, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value) {
        
        if (force_eager_fallback(at::aten::constant_pad_nd)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(constant_pad_nd)>::call(
                self,
                c10::fromIntArrayRefSlow(pad),
                value
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_value = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(value, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<ConstantPadNd>(lazy_self->GetIrValue(), std::vector<int64_t>(pad.begin(), pad.end()), node_value);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_constant_pad_nd(self, pad, value);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, pad, value };
                const char* schema_str = "aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<ConstantPadNd>(lazy_self->GetIrValue(), std::vector<int64_t>(pad.begin(), pad.end()), node_value, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
        
        if (force_eager_fallback(at::aten::convolution)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(convolution)>::call(
                input,
                weight,
                bias,
                c10::fromIntArrayRefSlow(stride),
                c10::fromIntArrayRefSlow(padding),
                c10::fromIntArrayRefSlow(dilation),
                transposed,
                c10::fromIntArrayRefSlow(output_padding),
                groups
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(input, weight, bias);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(weight, *common_device);
        LazyTensorPtr lazy_bias = torch::lazy::TryGetLtcTensor(bias.value_or(at::Tensor()));
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Convolution>(lazy_input->GetIrValue(), lazy_weight->GetIrValue(), lazy_bias ? std::make_optional(lazy_bias->GetIrValue()) : ::std::nullopt, std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), transposed, std::vector<int64_t>(output_padding.begin(), output_padding.end()), groups);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { input, weight, bias, stride, padding, dilation, transposed, output_padding, groups };
                const char* schema_str = "aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Convolution>(lazy_input->GetIrValue(), lazy_weight->GetIrValue(), lazy_bias ? std::make_optional(lazy_bias->GetIrValue()) : ::std::nullopt, std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), transposed, std::vector<int64_t>(output_padding.begin(), output_padding.end()), groups, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor,at::Tensor> LazyNativeFunctions::convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
        
        if (force_eager_fallback(at::aten::convolution_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(convolution_backward)>::call(
                grad_output,
                input,
                weight,
                bias_sizes.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : ::std::nullopt,
                c10::fromIntArrayRefSlow(stride),
                c10::fromIntArrayRefSlow(padding),
                c10::fromIntArrayRefSlow(dilation),
                transposed,
                c10::fromIntArrayRefSlow(output_padding),
                groups,
                output_mask
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, input, weight);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(weight, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<ConvolutionBackward>(lazy_grad_output->GetIrValue(), lazy_input->GetIrValue(), lazy_weight->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(bias_sizes), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), transposed, std::vector<int64_t>(output_padding.begin(), output_padding.end()), groups, std::vector<bool>(output_mask.begin(), output_mask.end()));
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_convolution_backward(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
            TORCH_INTERNAL_ASSERT(shapes.size() == 3);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask };
                const char* schema_str = "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<ConvolutionBackward>(lazy_grad_output->GetIrValue(), lazy_input->GetIrValue(), lazy_weight->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(bias_sizes), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), transposed, std::vector<int64_t>(output_padding.begin(), output_padding.end()), groups, std::vector<bool>(output_mask.begin(), output_mask.end()), std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 3; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<3>(lazy_tensors);
        return result;
    }

    
    at::Tensor LazyNativeFunctions::cos(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::cos)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(cos)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Cos>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::cos(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::cos(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Cos>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::cumsum(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
        
        if (force_eager_fallback(at::aten::cumsum)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(cumsum)>::call(
                self,
                dim,
                dtype
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Cumsum>(lazy_self->GetIrValue(), dim, dtype);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::cumsum(self_meta, dim, dtype);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, dtype };
                const char* schema_str = "aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Cumsum>(lazy_self->GetIrValue(), dim, dtype, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::detach_copy(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::detach_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(detach_copy)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<DetachCopy>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::detach_copy(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::detach_copy(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<DetachCopy>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::diagonal_copy(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
        
        if (force_eager_fallback(at::aten::diagonal_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(diagonal_copy)>::call(
                self,
                offset,
                dim1,
                dim2
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<DiagonalCopy>(lazy_self->GetIrValue(), offset, dim1, dim2);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::diagonal_copy(self_meta, offset, dim1, dim2);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, offset, dim1, dim2 };
                const char* schema_str = "aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<DiagonalCopy>(lazy_self->GetIrValue(), offset, dim1, dim2, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) {
        
        if (force_eager_fallback(at::aten::diagonal_scatter)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(diagonal_scatter)>::call(
                self,
                src,
                offset,
                dim1,
                dim2
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, src);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_src = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(src, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<DiagonalScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), offset, dim1, dim2);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_diagonal_scatter(self, src, offset, dim1, dim2);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, src, offset, dim1, dim2 };
                const char* schema_str = "aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<DiagonalScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), offset, dim1, dim2, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::div(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::div)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(div, Tensor)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<DivTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::div(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::div.Tensor(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<DivTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::div(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
        
        if (force_eager_fallback(at::aten::div)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(div, Tensor_mode)>::call(
                self,
                other,
                rounding_mode
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<DivTensorMode>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), rounding_mode);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::div(self_meta, other_meta, rounding_mode);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other, rounding_mode };
                const char* schema_str = "aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<DivTensorMode>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), rounding_mode, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::elu(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
        
        if (force_eager_fallback(at::aten::elu)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(elu)>::call(
                self,
                alpha,
                scale,
                input_scale
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(alpha, *common_device);
        auto node_scale = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(scale, *common_device);
        auto node_input_scale = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(input_scale, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Elu>(lazy_self->GetIrValue(), node_alpha, node_scale, node_input_scale);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::elu(self_meta, alpha, scale, input_scale);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, alpha, scale, input_scale };
                const char* schema_str = "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Elu>(lazy_self->GetIrValue(), node_alpha, node_scale, node_input_scale, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
        
        if (force_eager_fallback(at::aten::elu_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(elu_backward)>::call(
                grad_output,
                alpha,
                scale,
                input_scale,
                is_result,
                self_or_result
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self_or_result);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(alpha, *common_device);
        auto node_scale = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(scale, *common_device);
        auto node_input_scale = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(input_scale, *common_device);
        LazyTensorPtr lazy_self_or_result = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self_or_result, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<EluBackward>(lazy_grad_output->GetIrValue(), node_alpha, node_scale, node_input_scale, is_result, lazy_self_or_result->GetIrValue());
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto self_or_result_meta = to_meta(self_or_result);
        auto out_meta = at::meta::elu_backward(grad_output_meta, alpha, scale, input_scale, is_result, self_or_result_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, alpha, scale, input_scale, is_result, self_or_result };
                const char* schema_str = "aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<EluBackward>(lazy_grad_output->GetIrValue(), node_alpha, node_scale, node_input_scale, is_result, lazy_self_or_result->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) {
        
        if (force_eager_fallback(at::aten::embedding)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(embedding)>::call(
                weight,
                indices,
                padding_idx,
                scale_grad_by_freq,
                sparse
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(weight, indices);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_weight = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(weight, *common_device);
        LazyTensorPtr lazy_indices = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(indices, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Embedding>(lazy_weight->GetIrValue(), lazy_indices->GetIrValue(), padding_idx, scale_grad_by_freq, sparse);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_embedding(weight, indices, padding_idx, scale_grad_by_freq, sparse);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { weight, indices, padding_idx, scale_grad_by_freq, sparse };
                const char* schema_str = "aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Embedding>(lazy_weight->GetIrValue(), lazy_indices->GetIrValue(), padding_idx, scale_grad_by_freq, sparse, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
        
        if (force_eager_fallback(at::aten::embedding_dense_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(embedding_dense_backward)>::call(
                grad_output,
                indices,
                num_weights,
                padding_idx,
                scale_grad_by_freq
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, indices);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_indices = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(indices, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<EmbeddingDenseBackward>(lazy_grad_output->GetIrValue(), lazy_indices->GetIrValue(), num_weights, padding_idx, scale_grad_by_freq);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_embedding_dense_backward(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, indices, num_weights, padding_idx, scale_grad_by_freq };
                const char* schema_str = "aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<EmbeddingDenseBackward>(lazy_grad_output->GetIrValue(), lazy_indices->GetIrValue(), num_weights, padding_idx, scale_grad_by_freq, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::eq(const at::Tensor & self, const at::Scalar & other) {
        
        if (force_eager_fallback(at::aten::eq)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(eq, Scalar)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_other = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<EqScalar>(lazy_self->GetIrValue(), node_other);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::eq(self_meta, other);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<EqScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::eq(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::eq)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(eq, Tensor)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<EqTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::eq(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<EqTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::exp(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::exp)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(exp)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Exp>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::exp(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::exp(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Exp>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::expand_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
        
        if (force_eager_fallback(at::aten::expand_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(expand_copy)>::call(
                self,
                size,
                implicit
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<ExpandCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), implicit);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::expand_copy_symint(self_meta, size, implicit);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, size, implicit };
                const char* schema_str = "aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<ExpandCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), implicit, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::flip(const at::Tensor & self, at::IntArrayRef dims) {
        
        if (force_eager_fallback(at::aten::flip)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(flip)>::call(
                self,
                dims
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Flip>(lazy_self->GetIrValue(), std::vector<int64_t>(dims.begin(), dims.end()));
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_flip(self, dims);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dims };
                const char* schema_str = "aten::flip(Tensor self, int[] dims) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Flip>(lazy_self->GetIrValue(), std::vector<int64_t>(dims.begin(), dims.end()), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::floor(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::floor)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(floor)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Floor>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::floor(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::floor(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Floor>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::frac(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::frac)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(frac)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Frac>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::frac(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::frac(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Frac>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
        
        if (force_eager_fallback(at::aten::gather)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(gather)>::call(
                self,
                dim,
                index,
                sparse_grad
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, index);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_index = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(index, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Gather>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue(), sparse_grad);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto out_meta = at::meta::gather(self_meta, dim, index_meta, sparse_grad);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, index, sparse_grad };
                const char* schema_str = "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Gather>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue(), sparse_grad, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::ge(const at::Tensor & self, const at::Scalar & other) {
        
        if (force_eager_fallback(at::aten::ge)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(ge, Scalar)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_other = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<GeScalar>(lazy_self->GetIrValue(), node_other);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::ge(self_meta, other);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<GeScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::ge(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::ge)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(ge, Tensor)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<GeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::ge(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<GeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::gelu(const at::Tensor & self, c10::string_view approximate) {
        
        if (force_eager_fallback(at::aten::gelu)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(gelu)>::call(
                self,
                approximate
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Gelu>(lazy_self->GetIrValue(), approximate);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::gelu(self_meta, approximate);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, approximate };
                const char* schema_str = "aten::gelu(Tensor self, *, str approximate='none') -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Gelu>(lazy_self->GetIrValue(), approximate, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
        
        if (force_eager_fallback(at::aten::gelu_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(gelu_backward)>::call(
                grad_output,
                self,
                approximate
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<GeluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), approximate);
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto out_meta = at::meta::gelu_backward(grad_output_meta, self_meta, approximate);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, approximate };
                const char* schema_str = "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<GeluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), approximate, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::glu(const at::Tensor & self, int64_t dim) {
        
        if (force_eager_fallback(at::aten::glu)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(glu)>::call(
                self,
                dim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Glu>(lazy_self->GetIrValue(), dim);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::glu(self_meta, dim);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim };
                const char* schema_str = "aten::glu(Tensor self, int dim=-1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Glu>(lazy_self->GetIrValue(), dim, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
        
        if (force_eager_fallback(at::aten::glu_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(glu_backward)>::call(
                grad_output,
                self,
                dim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<GluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), dim);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_glu_backward(grad_output, self, dim);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, dim };
                const char* schema_str = "aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<GluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), dim, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
        
        if (force_eager_fallback(at::aten::glu_jvp)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(glu_jvp)>::call(
                glu,
                x,
                dx,
                dim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(glu, x, dx);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_glu = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(glu, *common_device);
        LazyTensorPtr lazy_x = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(x, *common_device);
        LazyTensorPtr lazy_dx = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(dx, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<GluJvp>(lazy_glu->GetIrValue(), lazy_x->GetIrValue(), lazy_dx->GetIrValue(), dim);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_glu_jvp(glu, x, dx, dim);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { glu, x, dx, dim };
                const char* schema_str = "aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<GluJvp>(lazy_glu->GetIrValue(), lazy_x->GetIrValue(), lazy_dx->GetIrValue(), dim, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
        
        if (force_eager_fallback(at::aten::grid_sampler_2d)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(grid_sampler_2d)>::call(
                input,
                grid,
                interpolation_mode,
                padding_mode,
                align_corners
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(input, grid);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
        LazyTensorPtr lazy_grid = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grid, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<GridSampler2d>(lazy_input->GetIrValue(), lazy_grid->GetIrValue(), interpolation_mode, padding_mode, align_corners);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_grid_sampler_2d(input, grid, interpolation_mode, padding_mode, align_corners);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { input, grid, interpolation_mode, padding_mode, align_corners };
                const char* schema_str = "aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<GridSampler2d>(lazy_input->GetIrValue(), lazy_grid->GetIrValue(), interpolation_mode, padding_mode, align_corners, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
        
        if (force_eager_fallback(at::aten::grid_sampler_2d_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(grid_sampler_2d_backward)>::call(
                grad_output,
                input,
                grid,
                interpolation_mode,
                padding_mode,
                align_corners,
                output_mask
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, input, grid);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
        LazyTensorPtr lazy_grid = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grid, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<GridSampler2dBackward>(lazy_grad_output->GetIrValue(), lazy_input->GetIrValue(), lazy_grid->GetIrValue(), interpolation_mode, padding_mode, align_corners, std::vector<bool>(output_mask.begin(), output_mask.end()));
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_grid_sampler_2d_backward(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
            TORCH_INTERNAL_ASSERT(shapes.size() == 2);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask };
                const char* schema_str = "aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<GridSampler2dBackward>(lazy_grad_output->GetIrValue(), lazy_input->GetIrValue(), lazy_grid->GetIrValue(), interpolation_mode, padding_mode, align_corners, std::vector<bool>(output_mask.begin(), output_mask.end()), std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 2; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
        return result;
    }

    
    at::Tensor LazyNativeFunctions::gt(const at::Tensor & self, const at::Scalar & other) {
        
        if (force_eager_fallback(at::aten::gt)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(gt, Scalar)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_other = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<GtScalar>(lazy_self->GetIrValue(), node_other);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::gt(self_meta, other);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<GtScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::gt(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::gt)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(gt, Tensor)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<GtTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::gt(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<GtTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::hardsigmoid(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::hardsigmoid)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(hardsigmoid)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Hardsigmoid>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::hardsigmoid(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::hardsigmoid(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Hardsigmoid>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
        
        if (force_eager_fallback(at::aten::index_select)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(index_select)>::call(
                self,
                dim,
                index
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, index);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_index = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(index, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<IndexSelect>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_index_select(self, dim, index);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, index };
                const char* schema_str = "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<IndexSelect>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::le(const at::Tensor & self, const at::Scalar & other) {
        
        if (force_eager_fallback(at::aten::le)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(le, Scalar)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_other = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<LeScalar>(lazy_self->GetIrValue(), node_other);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::le(self_meta, other);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::le.Scalar(Tensor self, Scalar other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<LeScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::le(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::le)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(le, Tensor)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<LeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::le(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::le.Tensor(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<LeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) {
        
        if (force_eager_fallback(at::aten::leaky_relu)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(leaky_relu)>::call(
                self,
                negative_slope
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_negative_slope = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(negative_slope, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<LeakyRelu>(lazy_self->GetIrValue(), node_negative_slope);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::leaky_relu(self_meta, negative_slope);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, negative_slope };
                const char* schema_str = "aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<LeakyRelu>(lazy_self->GetIrValue(), node_negative_slope, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
        
        if (force_eager_fallback(at::aten::leaky_relu_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(leaky_relu_backward)>::call(
                grad_output,
                self,
                negative_slope,
                self_is_result
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_negative_slope = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(negative_slope, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<LeakyReluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_negative_slope, self_is_result);
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto out_meta = at::meta::leaky_relu_backward(grad_output_meta, self_meta, negative_slope, self_is_result);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, negative_slope, self_is_result };
                const char* schema_str = "aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<LeakyReluBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_negative_slope, self_is_result, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::log(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::log)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(log)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Log>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::log(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::log(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Log>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::log2(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::log2)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(log2)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Log2>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::log2(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::log2(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Log2>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
        
        if (force_eager_fallback(at::aten::log_sigmoid_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(log_sigmoid_backward)>::call(
                grad_output,
                self,
                buffer
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self, buffer);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_buffer = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(buffer, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<LogSigmoidBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_buffer->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_log_sigmoid_backward(grad_output, self, buffer);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, buffer };
                const char* schema_str = "aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<LogSigmoidBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_buffer->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::log_sigmoid_forward(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::log_sigmoid_forward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(log_sigmoid_forward)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<LogSigmoidForward>(lazy_self->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_log_sigmoid_forward(self);
            TORCH_INTERNAL_ASSERT(shapes.size() == 2);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<LogSigmoidForward>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 2; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
        return result;
    }

    
    at::Tensor LazyNativeFunctions::logdet(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::logdet)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(logdet)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Logdet>(lazy_self->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_logdet(self);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::logdet(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Logdet>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::lt(const at::Tensor & self, const at::Scalar & other) {
        
        if (force_eager_fallback(at::aten::lt)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(lt, Scalar)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_other = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<LtScalar>(lazy_self->GetIrValue(), node_other);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::lt(self_meta, other);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<LtScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::lt(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::lt)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(lt, Tensor)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<LtTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::lt(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<LtTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
        
        if (force_eager_fallback(at::aten::masked_fill)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(masked_fill, Scalar)>::call(
                self,
                mask,
                value
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, mask);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_mask = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mask, *common_device);
        auto node_value = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(value, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<MaskedFillScalar>(lazy_self->GetIrValue(), lazy_mask->GetIrValue(), node_value);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_masked_fill(self, mask, value);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, mask, value };
                const char* schema_str = "aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<MaskedFillScalar>(lazy_self->GetIrValue(), lazy_mask->GetIrValue(), node_value, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
        
        if (force_eager_fallback(at::aten::masked_fill)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(masked_fill, Tensor)>::call(
                self,
                mask,
                value
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, mask, value);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_mask = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mask, *common_device);
        LazyTensorPtr lazy_value = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(value, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<MaskedFillTensor>(lazy_self->GetIrValue(), lazy_mask->GetIrValue(), lazy_value->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_masked_fill(self, mask, value);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, mask, value };
                const char* schema_str = "aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<MaskedFillTensor>(lazy_self->GetIrValue(), lazy_mask->GetIrValue(), lazy_value->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::max(const at::Tensor & self, int64_t dim, bool keepdim) {
        
        if (force_eager_fallback(at::aten::max)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(max, dim)>::call(
                self,
                dim,
                keepdim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<MaxDim>(lazy_self->GetIrValue(), dim, keepdim);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::max(self_meta, dim, keepdim);
        std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(std::get<0>(out_meta).scalar_type(), std::get<0>(out_meta).sizes().vec()),torch::lazy::Shape(std::get<1>(out_meta).scalar_type(), std::get<1>(out_meta).sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 2);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, keepdim };
                const char* schema_str = "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<MaxDim>(lazy_self->GetIrValue(), dim, keepdim, std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 2; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
        return result;
    }

    
    at::Tensor LazyNativeFunctions::max(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::max)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(max)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Max>(lazy_self->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_max(self);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::max(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Max>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
        
        if (force_eager_fallback(at::aten::max_pool2d_with_indices)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(max_pool2d_with_indices)>::call(
                self,
                kernel_size,
                stride,
                padding,
                dilation,
                ceil_mode
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<MaxPool2dWithIndices>(lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), ceil_mode);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::max_pool2d_with_indices(self_meta, kernel_size, stride, padding, dilation, ceil_mode);
        std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(std::get<0>(out_meta).scalar_type(), std::get<0>(out_meta).sizes().vec()),torch::lazy::Shape(std::get<1>(out_meta).scalar_type(), std::get<1>(out_meta).sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 2);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, kernel_size, stride, padding, dilation, ceil_mode };
                const char* schema_str = "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<MaxPool2dWithIndices>(lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), ceil_mode, std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 2; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
        return result;
    }

    
    at::Tensor LazyNativeFunctions::max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
        
        if (force_eager_fallback(at::aten::max_pool2d_with_indices_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(max_pool2d_with_indices_backward)>::call(
                grad_output,
                self,
                kernel_size,
                stride,
                padding,
                dilation,
                ceil_mode,
                indices
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self, indices);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_indices = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(indices, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<MaxPool2dWithIndicesBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), ceil_mode, lazy_indices->GetIrValue());
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto out_meta = at::meta::max_pool2d_with_indices_backward(grad_output_meta, self_meta, kernel_size, stride, padding, dilation, ceil_mode, indices_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices };
                const char* schema_str = "aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<MaxPool2dWithIndicesBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), std::vector<int64_t>(kernel_size.begin(), kernel_size.end()), std::vector<int64_t>(stride.begin(), stride.end()), std::vector<int64_t>(padding.begin(), padding.end()), std::vector<int64_t>(dilation.begin(), dilation.end()), ceil_mode, lazy_indices->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::maximum(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::maximum)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(maximum)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Maximum>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::maximum(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::maximum(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Maximum>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::mean(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
        
        if (force_eager_fallback(at::aten::mean)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(mean)>::call(
                self,
                dtype
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Mean>(lazy_self->GetIrValue(), dtype);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_mean(self, dtype);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dtype };
                const char* schema_str = "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Mean>(lazy_self->GetIrValue(), dtype, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
        
        if (force_eager_fallback(at::aten::mean)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(mean, dim)>::call(
                self,
                dim,
                keepdim,
                dtype
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<MeanDim>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), keepdim, dtype);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::mean(self_meta, dim, keepdim, dtype);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, keepdim, dtype };
                const char* schema_str = "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<MeanDim>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), keepdim, dtype, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::min(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::min)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(min)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Min>(lazy_self->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_min(self);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::min(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Min>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::minimum(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::minimum)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(minimum)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Minimum>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::minimum(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::minimum(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Minimum>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::mm(const at::Tensor & self, const at::Tensor & mat2) {
        
        if (force_eager_fallback(at::aten::mm)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(mm)>::call(
                self,
                mat2
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, mat2);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_mat2 = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mat2, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Mm>(lazy_self->GetIrValue(), lazy_mat2->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = at::meta::mm(self_meta, mat2_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, mat2 };
                const char* schema_str = "aten::mm(Tensor self, Tensor mat2) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Mm>(lazy_self->GetIrValue(), lazy_mat2->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::mul(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::mul)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(mul, Tensor)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<MulTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::mul(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::mul.Tensor(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<MulTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::mv(const at::Tensor & self, const at::Tensor & vec) {
        
        if (force_eager_fallback(at::aten::mv)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(mv)>::call(
                self,
                vec
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, vec);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_vec = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(vec, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Mv>(lazy_self->GetIrValue(), lazy_vec->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_mv(self, vec);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, vec };
                const char* schema_str = "aten::mv(Tensor self, Tensor vec) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Mv>(lazy_self->GetIrValue(), lazy_vec->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor,at::Tensor> LazyNativeFunctions::native_batch_norm(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
        
        if (force_eager_fallback(at::aten::native_batch_norm)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_batch_norm)>::call(
                input,
                weight,
                bias,
                running_mean,
                running_var,
                training,
                momentum,
                eps
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(input, weight, bias, running_mean, running_var);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
        LazyTensorPtr lazy_bias = torch::lazy::TryGetLtcTensor(bias.value_or(at::Tensor()));
        LazyTensorPtr lazy_running_mean = torch::lazy::TryGetLtcTensor(running_mean.value_or(at::Tensor()));
        LazyTensorPtr lazy_running_var = torch::lazy::TryGetLtcTensor(running_var.value_or(at::Tensor()));
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeBatchNorm>(lazy_input->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, lazy_bias ? std::make_optional(lazy_bias->GetIrValue()) : ::std::nullopt, lazy_running_mean ? std::make_optional(lazy_running_mean->GetIrValue()) : ::std::nullopt, lazy_running_var ? std::make_optional(lazy_running_var->GetIrValue()) : ::std::nullopt, training, momentum, eps);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_native_batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps);
            TORCH_INTERNAL_ASSERT(shapes.size() == 3);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { input, weight, bias, running_mean, running_var, training, momentum, eps };
                const char* schema_str = "aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NativeBatchNorm>(lazy_input->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, lazy_bias ? std::make_optional(lazy_bias->GetIrValue()) : ::std::nullopt, lazy_running_mean ? std::make_optional(lazy_running_mean->GetIrValue()) : ::std::nullopt, lazy_running_var ? std::make_optional(lazy_running_var->GetIrValue()) : ::std::nullopt, training, momentum, eps, std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 3; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<3>(lazy_tensors);
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor,at::Tensor> LazyNativeFunctions::native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
        
        if (force_eager_fallback(at::aten::native_batch_norm_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_batch_norm_backward)>::call(
                grad_out,
                input,
                weight,
                running_mean,
                running_var,
                save_mean,
                save_invstd,
                train,
                eps,
                output_mask
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_out = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_out, *common_device);
        LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
        LazyTensorPtr lazy_running_mean = torch::lazy::TryGetLtcTensor(running_mean.value_or(at::Tensor()));
        LazyTensorPtr lazy_running_var = torch::lazy::TryGetLtcTensor(running_var.value_or(at::Tensor()));
        LazyTensorPtr lazy_save_mean = torch::lazy::TryGetLtcTensor(save_mean.value_or(at::Tensor()));
        LazyTensorPtr lazy_save_invstd = torch::lazy::TryGetLtcTensor(save_invstd.value_or(at::Tensor()));
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeBatchNormBackward>(lazy_grad_out->GetIrValue(), lazy_input->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, lazy_running_mean ? std::make_optional(lazy_running_mean->GetIrValue()) : ::std::nullopt, lazy_running_var ? std::make_optional(lazy_running_var->GetIrValue()) : ::std::nullopt, lazy_save_mean ? std::make_optional(lazy_save_mean->GetIrValue()) : ::std::nullopt, lazy_save_invstd ? std::make_optional(lazy_save_invstd->GetIrValue()) : ::std::nullopt, train, eps, std::vector<bool>(output_mask.begin(), output_mask.end()));
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_native_batch_norm_backward(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
            TORCH_INTERNAL_ASSERT(shapes.size() == 3);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask };
                const char* schema_str = "aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NativeBatchNormBackward>(lazy_grad_out->GetIrValue(), lazy_input->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, lazy_running_mean ? std::make_optional(lazy_running_mean->GetIrValue()) : ::std::nullopt, lazy_running_var ? std::make_optional(lazy_running_var->GetIrValue()) : ::std::nullopt, lazy_save_mean ? std::make_optional(lazy_save_mean->GetIrValue()) : ::std::nullopt, lazy_save_invstd ? std::make_optional(lazy_save_invstd->GetIrValue()) : ::std::nullopt, train, eps, std::vector<bool>(output_mask.begin(), output_mask.end()), std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 3; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<3>(lazy_tensors);
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::native_dropout(const at::Tensor & input, double p, ::std::optional<bool> train) {
        
        if (force_eager_fallback(at::aten::native_dropout)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_dropout)>::call(
                input,
                p,
                train
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(input);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeDropout>(lazy_input->GetIrValue(), p, train);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_native_dropout(input, p, train);
            TORCH_INTERNAL_ASSERT(shapes.size() == 2);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { input, p, train };
                const char* schema_str = "aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NativeDropout>(lazy_input->GetIrValue(), p, train, std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 2; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
        return result;
    }

    
    at::Tensor LazyNativeFunctions::native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
        
        if (force_eager_fallback(at::aten::native_dropout_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_dropout_backward)>::call(
                grad_output,
                mask,
                scale
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, mask);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_mask = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mask, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeDropoutBackward>(lazy_grad_output->GetIrValue(), lazy_mask->GetIrValue(), scale);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_native_dropout_backward(grad_output, mask, scale);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, mask, scale };
                const char* schema_str = "aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NativeDropoutBackward>(lazy_grad_output->GetIrValue(), lazy_mask->GetIrValue(), scale, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor,at::Tensor> LazyNativeFunctions::native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps) {
        
        if (force_eager_fallback(at::aten::native_layer_norm)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_layer_norm)>::call(
                input,
                c10::fromIntArrayRefSlow(normalized_shape),
                weight,
                bias,
                eps
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(input, weight, bias);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
        LazyTensorPtr lazy_bias = torch::lazy::TryGetLtcTensor(bias.value_or(at::Tensor()));
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeLayerNorm>(lazy_input->GetIrValue(), std::vector<int64_t>(normalized_shape.begin(), normalized_shape.end()), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, lazy_bias ? std::make_optional(lazy_bias->GetIrValue()) : ::std::nullopt, eps);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_native_layer_norm(input, normalized_shape, weight, bias, eps);
            TORCH_INTERNAL_ASSERT(shapes.size() == 3);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { input, normalized_shape, weight, bias, eps };
                const char* schema_str = "aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NativeLayerNorm>(lazy_input->GetIrValue(), std::vector<int64_t>(normalized_shape.begin(), normalized_shape.end()), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, lazy_bias ? std::make_optional(lazy_bias->GetIrValue()) : ::std::nullopt, eps, std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 3; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<3>(lazy_tensors);
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor,at::Tensor> LazyNativeFunctions::native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
        
        if (force_eager_fallback(at::aten::native_layer_norm_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(native_layer_norm_backward)>::call(
                grad_out,
                input,
                c10::fromIntArrayRefSlow(normalized_shape),
                mean,
                rstd,
                weight,
                bias,
                output_mask
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_out, input, mean, rstd, weight, bias);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_out = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_out, *common_device);
        LazyTensorPtr lazy_input = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(input, *common_device);
        LazyTensorPtr lazy_mean = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(mean, *common_device);
        LazyTensorPtr lazy_rstd = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(rstd, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
        LazyTensorPtr lazy_bias = torch::lazy::TryGetLtcTensor(bias.value_or(at::Tensor()));
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NativeLayerNormBackward>(lazy_grad_out->GetIrValue(), lazy_input->GetIrValue(), std::vector<int64_t>(normalized_shape.begin(), normalized_shape.end()), lazy_mean->GetIrValue(), lazy_rstd->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, lazy_bias ? std::make_optional(lazy_bias->GetIrValue()) : ::std::nullopt, std::vector<bool>(output_mask.begin(), output_mask.end()));
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_native_layer_norm_backward(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
            TORCH_INTERNAL_ASSERT(shapes.size() == 3);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask };
                const char* schema_str = "aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NativeLayerNormBackward>(lazy_grad_out->GetIrValue(), lazy_input->GetIrValue(), std::vector<int64_t>(normalized_shape.begin(), normalized_shape.end()), lazy_mean->GetIrValue(), lazy_rstd->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, lazy_bias ? std::make_optional(lazy_bias->GetIrValue()) : ::std::nullopt, std::vector<bool>(output_mask.begin(), output_mask.end()), std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 3; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<3>(lazy_tensors);
        return result;
    }

    
    at::Tensor LazyNativeFunctions::ne(const at::Tensor & self, const at::Scalar & other) {
        
        if (force_eager_fallback(at::aten::ne)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(ne, Scalar)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_other = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NeScalar>(lazy_self->GetIrValue(), node_other);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::ne(self_meta, other);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NeScalar>(lazy_self->GetIrValue(), node_other, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::ne(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::ne)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(ne, Tensor)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::ne(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NeTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::neg(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::neg)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(neg)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Neg>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::neg(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::neg(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Neg>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
        
        if (force_eager_fallback(at::aten::nll_loss2d_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(nll_loss2d_backward)>::call(
                grad_output,
                self,
                target,
                weight,
                reduction,
                ignore_index,
                total_weight
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self, target, weight, total_weight);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
        LazyTensorPtr lazy_total_weight = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(total_weight, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NllLoss2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction, ignore_index, lazy_total_weight->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_nll_loss2d_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, target, weight, reduction, ignore_index, total_weight };
                const char* schema_str = "aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NllLoss2dBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction, ignore_index, lazy_total_weight->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
        
        if (force_eager_fallback(at::aten::nll_loss2d_forward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(nll_loss2d_forward)>::call(
                self,
                target,
                weight,
                reduction,
                ignore_index
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, target, weight);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NllLoss2dForward>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction, ignore_index);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_nll_loss2d_forward(self, target, weight, reduction, ignore_index);
            TORCH_INTERNAL_ASSERT(shapes.size() == 2);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, target, weight, reduction, ignore_index };
                const char* schema_str = "aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NllLoss2dForward>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction, ignore_index, std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 2; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
        return result;
    }

    
    at::Tensor LazyNativeFunctions::nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
        
        if (force_eager_fallback(at::aten::nll_loss_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(nll_loss_backward)>::call(
                grad_output,
                self,
                target,
                weight,
                reduction,
                ignore_index,
                total_weight
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self, target, weight, total_weight);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
        LazyTensorPtr lazy_total_weight = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(total_weight, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NllLossBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction, ignore_index, lazy_total_weight->GetIrValue());
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto total_weight_meta = to_meta(total_weight);
        auto out_meta = at::meta::nll_loss_backward(grad_output_meta, self_meta, target_meta, weight_meta, reduction, ignore_index, total_weight_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, target, weight, reduction, ignore_index, total_weight };
                const char* schema_str = "aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NllLossBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction, ignore_index, lazy_total_weight->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
        
        if (force_eager_fallback(at::aten::nll_loss_forward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(nll_loss_forward)>::call(
                self,
                target,
                weight,
                reduction,
                ignore_index
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, target, weight);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
        LazyTensorPtr lazy_weight = torch::lazy::TryGetLtcTensor(weight.value_or(at::Tensor()));
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NllLossForward>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction, ignore_index);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto out_meta = at::meta::nll_loss_forward(self_meta, target_meta, weight_meta, reduction, ignore_index);
        std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(std::get<0>(out_meta).scalar_type(), std::get<0>(out_meta).sizes().vec()),torch::lazy::Shape(std::get<1>(out_meta).scalar_type(), std::get<1>(out_meta).sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 2);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, target, weight, reduction, ignore_index };
                const char* schema_str = "aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NllLossForward>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), lazy_weight ? std::make_optional(lazy_weight->GetIrValue()) : ::std::nullopt, reduction, ignore_index, std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 2; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
        return result;
    }

    
    at::Tensor LazyNativeFunctions::nonzero(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::nonzero)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(nonzero)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Nonzero>(lazy_self->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_nonzero(self);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::nonzero(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Nonzero>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::norm(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
        
        if (force_eager_fallback(at::aten::norm)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(norm, ScalarOpt_dim)>::call(
                self,
                p,
                dim,
                keepdim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_p = p ?
                std::make_optional(torch::lazy::LazyGraphExecutor::Get()->
                    GetIrValueForScalarFromCodegen(*p, *common_device)):
                ::std::nullopt;
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NormScalaroptDim>(lazy_self->GetIrValue(), node_p, std::vector<int64_t>(dim.begin(), dim.end()), keepdim);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::norm(self_meta, p, dim, keepdim);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, p, dim, keepdim };
                const char* schema_str = "aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NormScalaroptDim>(lazy_self->GetIrValue(), node_p, std::vector<int64_t>(dim.begin(), dim.end()), keepdim, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::normal_functional(const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
        
        if (force_eager_fallback(at::aten::normal_functional)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(normal_functional)>::call(
                self,
                mean,
                std,
                generator
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<NormalFunctional>(lazy_self->GetIrValue(), mean, std, generator);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_normal_functional(self, mean, std, generator);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, mean, std, generator };
                const char* schema_str = "aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<NormalFunctional>(lazy_self->GetIrValue(), mean, std, generator, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::permute_copy(const at::Tensor & self, at::IntArrayRef dims) {
        
        if (force_eager_fallback(at::aten::permute_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(permute_copy)>::call(
                self,
                dims
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<PermuteCopy>(lazy_self->GetIrValue(), std::vector<int64_t>(dims.begin(), dims.end()));
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::permute_copy(self_meta, dims);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dims };
                const char* schema_str = "aten::permute_copy(Tensor self, int[] dims) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<PermuteCopy>(lazy_self->GetIrValue(), std::vector<int64_t>(dims.begin(), dims.end()), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::pow(const at::Tensor & self, const at::Tensor & exponent) {
        
        if (force_eager_fallback(at::aten::pow)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(pow, Tensor_Tensor)>::call(
                self,
                exponent
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, exponent);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_exponent = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(exponent, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<PowTensorTensor>(lazy_self->GetIrValue(), lazy_exponent->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto exponent_meta = to_meta(exponent);
        auto out_meta = at::meta::pow(self_meta, exponent_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, exponent };
                const char* schema_str = "aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<PowTensorTensor>(lazy_self->GetIrValue(), lazy_exponent->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::pow(const at::Tensor & self, const at::Scalar & exponent) {
        
        if (force_eager_fallback(at::aten::pow)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(pow, Tensor_Scalar)>::call(
                self,
                exponent
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_exponent = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(exponent, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<PowTensorScalar>(lazy_self->GetIrValue(), node_exponent);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::pow(self_meta, exponent);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, exponent };
                const char* schema_str = "aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<PowTensorScalar>(lazy_self->GetIrValue(), node_exponent, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::random(const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator) {
        
        if (force_eager_fallback(at::aten::random)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(random, from)>::call(
                self,
                from,
                to,
                generator
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<RandomFrom>(lazy_self->GetIrValue(), from, to, generator);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_random(self, from, to, generator);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, from, to, generator };
                const char* schema_str = "aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<RandomFrom>(lazy_self->GetIrValue(), from, to, generator, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::random(const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator) {
        
        if (force_eager_fallback(at::aten::random)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(random, to)>::call(
                self,
                to,
                generator
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<RandomTo>(lazy_self->GetIrValue(), to, generator);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_random(self, to, generator);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, to, generator };
                const char* schema_str = "aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<RandomTo>(lazy_self->GetIrValue(), to, generator, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::random(const at::Tensor & self, ::std::optional<at::Generator> generator) {
        
        if (force_eager_fallback(at::aten::random)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(random)>::call(
                self,
                generator
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Random>(lazy_self->GetIrValue(), generator);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_random(self, generator);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, generator };
                const char* schema_str = "aten::random(Tensor self, *, Generator? generator=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Random>(lazy_self->GetIrValue(), generator, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::reciprocal(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::reciprocal)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(reciprocal)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Reciprocal>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::reciprocal(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::reciprocal(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Reciprocal>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::relu(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::relu)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(relu)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Relu>(lazy_self->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_relu(self);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::relu(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Relu>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::remainder(const at::Tensor & self, const at::Tensor & other) {
        
        if (force_eager_fallback(at::aten::remainder)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(remainder, Tensor)>::call(
                self,
                other
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<RemainderTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::remainder(self_meta, other_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other };
                const char* schema_str = "aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<RemainderTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::repeat(const at::Tensor & self, at::IntArrayRef repeats) {
        
        if (force_eager_fallback(at::aten::repeat)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(repeat)>::call(
                self,
                c10::fromIntArrayRefSlow(repeats)
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Repeat>(lazy_self->GetIrValue(), std::vector<int64_t>(repeats.begin(), repeats.end()));
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_repeat(self, repeats);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, repeats };
                const char* schema_str = "aten::repeat(Tensor self, SymInt[] repeats) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Repeat>(lazy_self->GetIrValue(), std::vector<int64_t>(repeats.begin(), repeats.end()), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::rsqrt(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::rsqrt)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(rsqrt)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Rsqrt>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::rsqrt(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::rsqrt(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Rsqrt>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::scatter_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
        
        if (force_eager_fallback(at::aten::scatter_add)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(scatter_add)>::call(
                self,
                dim,
                index,
                src
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, index, src);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_index = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(index, *common_device);
        LazyTensorPtr lazy_src = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(src, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<ScatterAdd>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue(), lazy_src->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto src_meta = to_meta(src);
        auto out_meta = at::meta::scatter_add(self_meta, dim, index_meta, src_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, index, src };
                const char* schema_str = "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<ScatterAdd>(lazy_self->GetIrValue(), dim, lazy_index->GetIrValue(), lazy_src->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::select_copy(const at::Tensor & self, int64_t dim, int64_t index) {
        
        if (force_eager_fallback(at::aten::select_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(select_copy, int)>::call(
                self,
                dim,
                index
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SelectCopyInt>(lazy_self->GetIrValue(), dim, index);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::select_copy(self_meta, dim, index);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, index };
                const char* schema_str = "aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SelectCopyInt>(lazy_self->GetIrValue(), dim, index, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) {
        
        if (force_eager_fallback(at::aten::select_scatter)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(select_scatter)>::call(
                self,
                src,
                dim,
                index
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, src);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_src = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(src, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SelectScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), dim, index);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_select_scatter(self, src, dim, index);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, src, dim, index };
                const char* schema_str = "aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SelectScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), dim, index, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::sgn(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::sgn)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sgn)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Sgn>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::sgn(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::sgn(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Sgn>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::sigmoid(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::sigmoid)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sigmoid)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Sigmoid>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::sigmoid(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::sigmoid(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Sigmoid>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) {
        
        if (force_eager_fallback(c10::Symbol::fromQualString("aten::sigmoid_backward"))) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sigmoid_backward)>::call(
                grad_output,
                output
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, output);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(output, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SigmoidBackward>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue());
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto out_meta = at::meta::sigmoid_backward(grad_output_meta, output_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, output };
                const char* schema_str = "aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SigmoidBackward>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::silu(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::silu)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(silu)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Silu>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::silu(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::silu(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Silu>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::slice_copy_symint(const at::Tensor & self, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
        
        if (force_eager_fallback(at::aten::slice_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(slice_copy, Tensor)>::call(
                self,
                dim,
                start,
                end,
                step
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, start, end, step);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SliceCopyTensor>(lazy_self->GetIrValue(), dim, start ? std::make_optional(GetSymIntValue(*start)) : ::std::nullopt, end ? std::make_optional(GetSymIntValue(*end)) : ::std::nullopt, GetSymIntValue(step));
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::slice_copy_symint(self_meta, dim, start, end, step);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, start, end, step };
                const char* schema_str = "aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SliceCopyTensor>(lazy_self->GetIrValue(), dim, start ? std::make_optional(GetSymIntValue(*start)) : ::std::nullopt, end ? std::make_optional(GetSymIntValue(*end)) : ::std::nullopt, GetSymIntValue(step), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
        
        if (force_eager_fallback(at::aten::slice_scatter)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(slice_scatter)>::call(
                self,
                src,
                dim,
                start,
                end,
                step
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, src, start, end, step);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_src = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(src, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SliceScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), dim, start ? std::make_optional(GetSymIntValue(*start)) : ::std::nullopt, end ? std::make_optional(GetSymIntValue(*end)) : ::std::nullopt, GetSymIntValue(step));
        if (!node) {
                    auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        auto out_meta = at::compositeexplicitautogradnonfunctional::slice_scatter_symint(self_meta, src_meta, dim, start, end, step);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, src, dim, start, end, step };
                const char* schema_str = "aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SliceScatter>(lazy_self->GetIrValue(), lazy_src->GetIrValue(), dim, start ? std::make_optional(GetSymIntValue(*start)) : ::std::nullopt, end ? std::make_optional(GetSymIntValue(*end)) : ::std::nullopt, GetSymIntValue(step), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
        
        if (force_eager_fallback(at::aten::smooth_l1_loss)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(smooth_l1_loss)>::call(
                self,
                target,
                reduction,
                beta
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, target);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SmoothL1Loss>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), reduction, beta);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto out_meta = at::meta::smooth_l1_loss(self_meta, target_meta, reduction, beta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, target, reduction, beta };
                const char* schema_str = "aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SmoothL1Loss>(lazy_self->GetIrValue(), lazy_target->GetIrValue(), reduction, beta, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
        
        if (force_eager_fallback(at::aten::smooth_l1_loss_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(smooth_l1_loss_backward)>::call(
                grad_output,
                self,
                target,
                reduction,
                beta
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self, target);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_target = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(target, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SmoothL1LossBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), reduction, beta);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_smooth_l1_loss_backward(grad_output, self, target, reduction, beta);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, target, reduction, beta };
                const char* schema_str = "aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SmoothL1LossBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), lazy_target->GetIrValue(), reduction, beta, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::softplus(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
        
        if (force_eager_fallback(at::aten::softplus)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(softplus)>::call(
                self,
                beta,
                threshold
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_beta = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(beta, *common_device);
        auto node_threshold = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(threshold, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Softplus>(lazy_self->GetIrValue(), node_beta, node_threshold);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::softplus(self_meta, beta, threshold);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, beta, threshold };
                const char* schema_str = "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Softplus>(lazy_self->GetIrValue(), node_beta, node_threshold, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
        
        if (force_eager_fallback(at::aten::softplus_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(softplus_backward)>::call(
                grad_output,
                self,
                beta,
                threshold
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_beta = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(beta, *common_device);
        auto node_threshold = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(threshold, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SoftplusBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_beta, node_threshold);
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto out_meta = at::meta::softplus_backward(grad_output_meta, self_meta, beta, threshold);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, beta, threshold };
                const char* schema_str = "aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SoftplusBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_beta, node_threshold, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::sort(const at::Tensor & self, int64_t dim, bool descending) {
        
        if (force_eager_fallback(at::aten::sort)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sort)>::call(
                self,
                dim,
                descending
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Sort>(lazy_self->GetIrValue(), dim, descending);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_sort(self, dim, descending);
            TORCH_INTERNAL_ASSERT(shapes.size() == 2);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, descending };
                const char* schema_str = "aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Sort>(lazy_self->GetIrValue(), dim, descending, std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 2; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
        return result;
    }

    
    at::Tensor LazyNativeFunctions::sqrt(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::sqrt)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sqrt)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Sqrt>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::sqrt(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::sqrt(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Sqrt>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::squeeze_copy(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::squeeze_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(squeeze_copy)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SqueezeCopy>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::squeeze_copy(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::squeeze_copy(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SqueezeCopy>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::squeeze_copy(const at::Tensor & self, int64_t dim) {
        
        if (force_eager_fallback(at::aten::squeeze_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(squeeze_copy, dim)>::call(
                self,
                dim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SqueezeCopyDim>(lazy_self->GetIrValue(), dim);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::squeeze_copy(self_meta, dim);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim };
                const char* schema_str = "aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SqueezeCopyDim>(lazy_self->GetIrValue(), dim, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::squeeze_copy(const at::Tensor & self, at::IntArrayRef dim) {
        
        if (force_eager_fallback(at::aten::squeeze_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(squeeze_copy, dims)>::call(
                self,
                dim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SqueezeCopyDims>(lazy_self->GetIrValue(), std::vector<int64_t>(dim.begin(), dim.end()));
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::squeeze_copy(self_meta, dim);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim };
                const char* schema_str = "aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SqueezeCopyDims>(lazy_self->GetIrValue(), std::vector<int64_t>(dim.begin(), dim.end()), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::stack(at::TensorList tensors, int64_t dim) {
        
        if (force_eager_fallback(at::aten::stack)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(stack)>::call(
                tensors,
                dim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(tensors);
        TORCH_INTERNAL_ASSERT(common_device);
        
        auto lazy_tensors_tensorlist = torch::lazy::GetTensorList(tensors);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Stack>(lazy_tensors_tensorlist, dim);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_stack(tensors, dim);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { tensors, dim };
                const char* schema_str = "aten::stack(Tensor[] tensors, int dim=0) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Stack>(lazy_tensors_tensorlist, dim, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::std(const at::Tensor & self, bool unbiased) {
        
        if (force_eager_fallback(at::aten::std)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(std)>::call(
                self,
                unbiased
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Std>(lazy_self->GetIrValue(), unbiased);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_std(self, unbiased);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, unbiased };
                const char* schema_str = "aten::std(Tensor self, bool unbiased=True) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Std>(lazy_self->GetIrValue(), unbiased, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
        
        if (force_eager_fallback(at::aten::std)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(std, dim)>::call(
                self,
                dim,
                unbiased,
                keepdim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<StdDim>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), unbiased, keepdim);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_std(self, dim, unbiased, keepdim);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, unbiased, keepdim };
                const char* schema_str = "aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<StdDim>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), unbiased, keepdim, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::std(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
        
        if (force_eager_fallback(at::aten::std)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(std, correction)>::call(
                self,
                dim,
                correction,
                keepdim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_correction = correction ?
                std::make_optional(torch::lazy::LazyGraphExecutor::Get()->
                    GetIrValueForScalarFromCodegen(*correction, *common_device)):
                ::std::nullopt;
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<StdCorrection>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), node_correction, keepdim);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_std(self, dim, correction, keepdim);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, correction, keepdim };
                const char* schema_str = "aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<StdCorrection>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), node_correction, keepdim, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
        
        if (force_eager_fallback(at::aten::sub)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(sub, Tensor)>::call(
                self,
                other,
                alpha
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self, other);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        LazyTensorPtr lazy_other = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(other, *common_device);
        auto node_alpha = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(alpha, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SubTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), node_alpha);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = at::meta::sub(self_meta, other_meta, alpha);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, other, alpha };
                const char* schema_str = "aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SubTensor>(lazy_self->GetIrValue(), lazy_other->GetIrValue(), node_alpha, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::sum(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
        
        if (force_eager_fallback(at::aten::sum)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(sum)>::call(
                self,
                dtype
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Sum>(lazy_self->GetIrValue(), dtype);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_sum(self, dtype);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dtype };
                const char* schema_str = "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Sum>(lazy_self->GetIrValue(), dtype, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
        
        if (force_eager_fallback(at::aten::sum)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(sum, dim_IntList)>::call(
                self,
                dim,
                keepdim,
                dtype
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<SumDimIntlist>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), keepdim, dtype);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::sum(self_meta, dim, keepdim, dtype);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim, keepdim, dtype };
                const char* schema_str = "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<SumDimIntlist>(lazy_self->GetIrValue(), torch::lazy::ToOptionalVector<int64_t>(dim), keepdim, dtype, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::t_copy(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::t_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(t_copy)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<TCopy>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::t_copy(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::t_copy(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<TCopy>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::tanh(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::tanh)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(tanh)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Tanh>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::tanh(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::tanh(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Tanh>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) {
        
        if (force_eager_fallback(at::aten::tanh_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(tanh_backward)>::call(
                grad_output,
                output
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, output);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(output, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<TanhBackward>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue());
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto out_meta = at::meta::tanh_backward(grad_output_meta, output_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, output };
                const char* schema_str = "aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<TanhBackward>(lazy_grad_output->GetIrValue(), lazy_output->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
        
        if (force_eager_fallback(at::aten::threshold)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(threshold)>::call(
                self,
                threshold,
                value
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_threshold = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(threshold, *common_device);
        auto node_value = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(value, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Threshold>(lazy_self->GetIrValue(), node_threshold, node_value);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::threshold(self_meta, threshold, value);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, threshold, value };
                const char* schema_str = "aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Threshold>(lazy_self->GetIrValue(), node_threshold, node_value, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
        
        if (force_eager_fallback(at::aten::threshold_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(threshold_backward)>::call(
                grad_output,
                self,
                threshold
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output, self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        auto node_threshold = torch::lazy::LazyGraphExecutor::Get()->
                            GetIrValueForScalarFromCodegen(threshold, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<ThresholdBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_threshold);
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto out_meta = at::meta::threshold_backward(grad_output_meta, self_meta, threshold);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, self, threshold };
                const char* schema_str = "aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<ThresholdBackward>(lazy_grad_output->GetIrValue(), lazy_self->GetIrValue(), node_threshold, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    ::std::tuple<at::Tensor,at::Tensor> LazyNativeFunctions::topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
        
        if (force_eager_fallback(at::aten::topk)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(topk)>::call(
                self,
                k,
                dim,
                largest,
                sorted
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Topk>(lazy_self->GetIrValue(), k, dim, largest, sorted);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::topk(self_meta, k, dim, largest, sorted);
        std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(std::get<0>(out_meta).scalar_type(), std::get<0>(out_meta).sizes().vec()),torch::lazy::Shape(std::get<1>(out_meta).scalar_type(), std::get<1>(out_meta).sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 2);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, k, dim, largest, sorted };
                const char* schema_str = "aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Topk>(lazy_self->GetIrValue(), k, dim, largest, sorted, std::move(shapes));
            CacheNode(node);
        }
        
        std::vector<LazyTensorPtr> lazy_tensors;
        for (int i = 0; i < 2; i++) {
            lazy_tensors.push_back(torch::lazy::LazyTensor::Create(torch::lazy::Value(node, i), *common_device));
        }
        auto result = torch::lazy::TupleAtenFromLtcTensors<2>(lazy_tensors);
        return result;
    }

    
    at::Tensor LazyNativeFunctions::trace(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::trace)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(trace)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Trace>(lazy_self->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_trace(self);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::trace(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Trace>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::transpose_copy(const at::Tensor & self, int64_t dim0, int64_t dim1) {
        
        if (force_eager_fallback(at::aten::transpose_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(transpose_copy, int)>::call(
                self,
                dim0,
                dim1
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<TransposeCopyInt>(lazy_self->GetIrValue(), dim0, dim1);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::transpose_copy(self_meta, dim0, dim1);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim0, dim1 };
                const char* schema_str = "aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<TransposeCopyInt>(lazy_self->GetIrValue(), dim0, dim1, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::tril(const at::Tensor & self, int64_t diagonal) {
        
        if (force_eager_fallback(at::aten::tril)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(tril)>::call(
                self,
                diagonal
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Tril>(lazy_self->GetIrValue(), diagonal);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::tril(self_meta, diagonal);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, diagonal };
                const char* schema_str = "aten::tril(Tensor self, int diagonal=0) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Tril>(lazy_self->GetIrValue(), diagonal, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::triu(const at::Tensor & self, int64_t diagonal) {
        
        if (force_eager_fallback(at::aten::triu)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(triu)>::call(
                self,
                diagonal
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Triu>(lazy_self->GetIrValue(), diagonal);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::triu(self_meta, diagonal);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, diagonal };
                const char* schema_str = "aten::triu(Tensor self, int diagonal=0) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Triu>(lazy_self->GetIrValue(), diagonal, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::trunc(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::trunc)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(trunc)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Trunc>(lazy_self->GetIrValue());
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::trunc(self_meta);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::trunc(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Trunc>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::unfold_copy(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
        
        if (force_eager_fallback(at::aten::unfold_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(unfold_copy)>::call(
                self,
                dimension,
                size,
                step
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<UnfoldCopy>(lazy_self->GetIrValue(), dimension, size, step);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::unfold_copy(self_meta, dimension, size, step);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dimension, size, step };
                const char* schema_str = "aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<UnfoldCopy>(lazy_self->GetIrValue(), dimension, size, step, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::uniform(const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
        
        if (force_eager_fallback(at::aten::uniform)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(uniform)>::call(
                self,
                from,
                to,
                generator
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Uniform>(lazy_self->GetIrValue(), from, to, generator);
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_uniform(self, from, to, generator);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, from, to, generator };
                const char* schema_str = "aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Uniform>(lazy_self->GetIrValue(), from, to, generator, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::unsqueeze_copy(const at::Tensor & self, int64_t dim) {
        
        if (force_eager_fallback(at::aten::unsqueeze_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(unsqueeze_copy)>::call(
                self,
                dim
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<UnsqueezeCopy>(lazy_self->GetIrValue(), dim);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::unsqueeze_copy(self_meta, dim);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dim };
                const char* schema_str = "aten::unsqueeze_copy(Tensor self, int dim) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<UnsqueezeCopy>(lazy_self->GetIrValue(), dim, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
        
        if (force_eager_fallback(at::aten::upsample_bilinear2d)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(upsample_bilinear2d)>::call(
                self,
                c10::fromIntArrayRefSlow(output_size),
                align_corners,
                scales_h,
                scales_w
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<UpsampleBilinear2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), align_corners, scales_h, scales_w);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::upsample_bilinear2d(self_meta, output_size, align_corners, scales_h, scales_w);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, output_size, align_corners, scales_h, scales_w };
                const char* schema_str = "aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<UpsampleBilinear2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), align_corners, scales_h, scales_w, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
        
        if (force_eager_fallback(at::aten::upsample_bilinear2d_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(upsample_bilinear2d_backward)>::call(
                grad_output,
                c10::fromIntArrayRefSlow(output_size),
                c10::fromIntArrayRefSlow(input_size),
                align_corners,
                scales_h,
                scales_w
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<UpsampleBilinear2dBackward>(lazy_grad_output->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), std::vector<int64_t>(input_size.begin(), input_size.end()), align_corners, scales_h, scales_w);
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto out_meta = at::meta::upsample_bilinear2d_backward(grad_output_meta, output_size, input_size, align_corners, scales_h, scales_w);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, output_size, input_size, align_corners, scales_h, scales_w };
                const char* schema_str = "aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<UpsampleBilinear2dBackward>(lazy_grad_output->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), std::vector<int64_t>(input_size.begin(), input_size.end()), align_corners, scales_h, scales_w, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::upsample_nearest2d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
        
        if (force_eager_fallback(at::aten::upsample_nearest2d)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(upsample_nearest2d)>::call(
                self,
                c10::fromIntArrayRefSlow(output_size),
                scales_h,
                scales_w
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<UpsampleNearest2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), scales_h, scales_w);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::meta::upsample_nearest2d(self_meta, output_size, scales_h, scales_w);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, output_size, scales_h, scales_w };
                const char* schema_str = "aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<UpsampleNearest2d>(lazy_self->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), scales_h, scales_w, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
        
        if (force_eager_fallback(at::aten::upsample_nearest2d_backward)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(upsample_nearest2d_backward)>::call(
                grad_output,
                c10::fromIntArrayRefSlow(output_size),
                c10::fromIntArrayRefSlow(input_size),
                scales_h,
                scales_w
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(grad_output);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_grad_output = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(grad_output, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<UpsampleNearest2dBackward>(lazy_grad_output->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), std::vector<int64_t>(input_size.begin(), input_size.end()), scales_h, scales_w);
        if (!node) {
                    auto grad_output_meta = to_meta(grad_output);
        auto out_meta = at::meta::upsample_nearest2d_backward(grad_output_meta, output_size, input_size, scales_h, scales_w);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { grad_output, output_size, input_size, scales_h, scales_w };
                const char* schema_str = "aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<UpsampleNearest2dBackward>(lazy_grad_output->GetIrValue(), std::vector<int64_t>(output_size.begin(), output_size.end()), std::vector<int64_t>(input_size.begin(), input_size.end()), scales_h, scales_w, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::view_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
        
        if (force_eager_fallback(at::aten::view_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(view_copy)>::call(
                self,
                size
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<ViewCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size));
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::view_copy_symint(self_meta, size);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, size };
                const char* schema_str = "aten::view_copy(Tensor self, SymInt[] size) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<ViewCopy>(lazy_self->GetIrValue(), GetSymIntArrayRefValue(size), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::view_copy(const at::Tensor & self, at::ScalarType dtype) {
        
        if (force_eager_fallback(at::aten::view_copy)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP2(view_copy, dtype)>::call(
                self,
                dtype
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<ViewCopyDtype>(lazy_self->GetIrValue(), dtype);
        if (!node) {
                    auto self_meta = to_meta(self);
        auto out_meta = at::compositeexplicitautogradnonfunctional::view_copy(self_meta, dtype);
        
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self, dtype };
                const char* schema_str = "aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<ViewCopyDtype>(lazy_self->GetIrValue(), dtype, std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

    
    at::Tensor LazyNativeFunctions::zero(const at::Tensor & self) {
        
        if (force_eager_fallback(at::aten::zero)) {
            return at::native::call_fallback_fn_symint<&ltc_eager_fallback, ATEN_OP(zero)>::call(
                self
            );
        }

        TORCH_LAZY_FN_COUNTER("lazy::");
        auto common_device = torch::lazy::GetBackendDevice(self);
        TORCH_INTERNAL_ASSERT(common_device);
        
        LazyTensorPtr lazy_self = torch::lazy::GetLtcTensorOrCreateForWrappedNumber(self, *common_device);
        torch::lazy::NodePtr node = torch::lazy::ReuseNode<Zero>(lazy_self->GetIrValue());
        if (!node) {
            
            auto shapes = torch::lazy::compute_shape_zero(self);
            TORCH_INTERNAL_ASSERT(shapes.size() == 1);
            if(torch::lazy::symbolicShapeEnabled()){
                std::vector<torch::jit::IValue> inputs = { self };
                const char* schema_str = "aten::zero(Tensor self) -> Tensor";
                applySymbolicShapesOnLT(schema_str, inputs, shapes);
            }
        
            node = torch::lazy::MakeNode<Zero>(lazy_self->GetIrValue(), std::move(shapes));
            CacheNode(node);
        }
        
        auto result = torch::lazy::CreateAtenFromLtcTensor(
                torch::lazy::LazyTensor::Create(std::move(node), *common_device));
        return result;
    }

} // namespace lazy
} // namespace torch
