#pragma once // This file contains autogenerated LazyTensor IR nodes #include #include #include #include #include #include #include #include "torch/csrc/lazy/ts_backend/ts_node.h" namespace torch { namespace lazy { using at::operator<<; // kNullValue is used to contribute a static hash value any time // a node has an Optional input that is nullopt. It is important // to differentiate between HASH(std::nullopt, something) and HASH(something, std::nullopt), // and using kNullValue in the hash function in the order of arguments // serves this purpose. static const torch::lazy::Value kNullValue = torch::lazy::Value(); class AdaptiveAvgPool2d : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::_adaptive_avg_pool2d); } AdaptiveAvgPool2d(const torch::lazy::Value& self, const ::std::vector& output_size, std::vector&& shapes) : TsNode( AdaptiveAvgPool2d::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(output_size)), output_size(output_size) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", output_size=" << output_size; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& output_size) const { size_t i = 0; return (operand(i++) == self && this->output_size == output_size); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("output_size", output_size); torch::lazy::TSOpVector _adaptive_avg_pool2d_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(_adaptive_avg_pool2d_out.size(), 1); return _adaptive_avg_pool2d_out; } ::std::vector output_size; }; class AdaptiveAvgPool2dBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::_adaptive_avg_pool2d_backward); } AdaptiveAvgPool2dBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, std::vector&& shapes) : TsNode( AdaptiveAvgPool2dBackward::ClassOpKind(), OpList{grad_output, self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector _adaptive_avg_pool2d_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(_adaptive_avg_pool2d_backward_out.size(), 1); return _adaptive_avg_pool2d_backward_out; } }; class LogSoftmax : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::_log_softmax); } LogSoftmax(const torch::lazy::Value& self, const int64_t& dim, const bool& half_to_float, std::vector&& shapes) : TsNode( LogSoftmax::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, half_to_float)), dim(dim), half_to_float(half_to_float) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; ss << ", half_to_float=" << half_to_float; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim, const bool& half_to_float) const { size_t i = 0; return (operand(i++) == self && this->dim == dim && this->half_to_float == half_to_float); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back("half_to_float", half_to_float); torch::lazy::TSOpVector _log_softmax_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(_log_softmax_out.size(), 1); return _log_softmax_out; } int64_t dim; bool half_to_float; }; class LogSoftmaxBackwardData : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::_log_softmax_backward_data); } LogSoftmaxBackwardData(const torch::lazy::Value& grad_output, const torch::lazy::Value& output, const int64_t& dim, const at::ScalarType& input_dtype, std::vector&& shapes) : TsNode( LogSoftmaxBackwardData::ClassOpKind(), OpList{grad_output, output}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, input_dtype)), dim(dim), input_dtype(input_dtype) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; ss << ", input_dtype=" << input_dtype; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& output, const int64_t& dim, const at::ScalarType& input_dtype) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == output && this->dim == dim && this->input_dtype == input_dtype); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back("input_dtype", input_dtype); torch::lazy::TSOpVector _log_softmax_backward_data_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(_log_softmax_backward_data_out.size(), 1); return _log_softmax_backward_data_out; } int64_t dim; at::ScalarType input_dtype; }; class ReshapeAliasCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::_reshape_alias_copy); } ReshapeAliasCopy(const torch::lazy::Value& self, const ::std::vector& size, const ::std::vector& stride, std::vector&& shapes) : TsNode( ReshapeAliasCopy::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(size, stride)), size(size), stride(stride) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", size=" << size; ss << ", stride=" << stride; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& size, const ::std::vector& stride) const { size_t i = 0; return (operand(i++) == self && this->size == size && this->stride == stride); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("size", size); arguments.emplace_back("stride", stride); torch::lazy::TSOpVector _reshape_alias_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(_reshape_alias_copy_out.size(), 1); return _reshape_alias_copy_out; } ::std::vector size; ::std::vector stride; }; class Softmax : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::_softmax); } Softmax(const torch::lazy::Value& self, const int64_t& dim, const bool& half_to_float, std::vector&& shapes) : TsNode( Softmax::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, half_to_float)), dim(dim), half_to_float(half_to_float) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; ss << ", half_to_float=" << half_to_float; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim, const bool& half_to_float) const { size_t i = 0; return (operand(i++) == self && this->dim == dim && this->half_to_float == half_to_float); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back("half_to_float", half_to_float); torch::lazy::TSOpVector _softmax_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(_softmax_out.size(), 1); return _softmax_out; } int64_t dim; bool half_to_float; }; class SoftmaxBackwardData : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::_softmax_backward_data); } SoftmaxBackwardData(const torch::lazy::Value& grad_output, const torch::lazy::Value& output, const int64_t& dim, const at::ScalarType& input_dtype, std::vector&& shapes) : TsNode( SoftmaxBackwardData::ClassOpKind(), OpList{grad_output, output}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, input_dtype)), dim(dim), input_dtype(input_dtype) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; ss << ", input_dtype=" << input_dtype; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& output, const int64_t& dim, const at::ScalarType& input_dtype) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == output && this->dim == dim && this->input_dtype == input_dtype); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back("input_dtype", input_dtype); torch::lazy::TSOpVector _softmax_backward_data_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(_softmax_backward_data_out.size(), 1); return _softmax_backward_data_out; } int64_t dim; at::ScalarType input_dtype; }; class Abs : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::abs); } Abs(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Abs::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector abs_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(abs_out.size(), 1); return abs_out; } }; class AddTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::add); } AddTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, const torch::lazy::Value& alpha, std::vector&& shapes) : TsNode( AddTensor::ClassOpKind(), OpList{self, other, alpha}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other, const torch::lazy::Value& alpha) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other && operand(i++) == alpha); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("alpha", loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector add_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(add_out.size(), 1); return add_out; } }; class Addcdiv : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::addcdiv); } Addcdiv(const torch::lazy::Value& self, const torch::lazy::Value& tensor1, const torch::lazy::Value& tensor2, const torch::lazy::Value& value, std::vector&& shapes) : TsNode( Addcdiv::ClassOpKind(), OpList{self, tensor1, tensor2, value}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& tensor1, const torch::lazy::Value& tensor2, const torch::lazy::Value& value) const { size_t i = 0; return (operand(i++) == self && operand(i++) == tensor1 && operand(i++) == tensor2 && operand(i++) == value); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("value", loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector addcdiv_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(addcdiv_out.size(), 1); return addcdiv_out; } }; class Addcmul : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::addcmul); } Addcmul(const torch::lazy::Value& self, const torch::lazy::Value& tensor1, const torch::lazy::Value& tensor2, const torch::lazy::Value& value, std::vector&& shapes) : TsNode( Addcmul::ClassOpKind(), OpList{self, tensor1, tensor2, value}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& tensor1, const torch::lazy::Value& tensor2, const torch::lazy::Value& value) const { size_t i = 0; return (operand(i++) == self && operand(i++) == tensor1 && operand(i++) == tensor2 && operand(i++) == value); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("value", loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector addcmul_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(addcmul_out.size(), 1); return addcmul_out; } }; class Addmm : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::addmm); } Addmm(const torch::lazy::Value& self, const torch::lazy::Value& mat1, const torch::lazy::Value& mat2, const torch::lazy::Value& beta, const torch::lazy::Value& alpha, std::vector&& shapes) : TsNode( Addmm::ClassOpKind(), OpList{self, mat1, mat2, beta, alpha}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& mat1, const torch::lazy::Value& mat2, const torch::lazy::Value& beta, const torch::lazy::Value& alpha) const { size_t i = 0; return (operand(i++) == self && operand(i++) == mat1 && operand(i++) == mat2 && operand(i++) == beta && operand(i++) == alpha); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(2); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("beta", loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("alpha", loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector addmm_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(addmm_out.size(), 1); return addmm_out; } }; class AliasCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::alias_copy); } AliasCopy(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( AliasCopy::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector alias_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(alias_copy_out.size(), 1); return alias_copy_out; } }; class All : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::all); } All(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( All::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector all_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(all_out.size(), 1); return all_out; } }; class Any : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::any); } Any(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Any::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector any_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(any_out.size(), 1); return any_out; } }; class ArangeStartOut : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::arange); } ArangeStartOut(const torch::lazy::Value& start, const torch::lazy::Value& end, const torch::lazy::Value& step, const torch::lazy::Value& out, std::vector&& shapes) : TsNode( ArangeStartOut::ClassOpKind(), OpList{start, end, step, out}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& start, const torch::lazy::Value& end, const torch::lazy::Value& step, const torch::lazy::Value& out) const { size_t i = 0; return (operand(i++) == start && operand(i++) == end && operand(i++) == step && operand(i++) == out); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("out", loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector arange_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(arange_out.size(), 1); return arange_out; } }; class AsStridedCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::as_strided_copy); } AsStridedCopy(const torch::lazy::Value& self, const ::std::vector& size, const ::std::vector& stride, const ::std::optional& storage_offset, std::vector&& shapes) : TsNode( AsStridedCopy::ClassOpKind(), OpList{self, storage_offset.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(size, stride)), size(size), stride(stride) { has_storage_offset = !!storage_offset; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", size=" << size; ss << ", stride=" << stride; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& size, const ::std::vector& stride, const ::std::optional& storage_offset) const { size_t i = 0; return (operand(i++) == self && nullable_operand(i++) == storage_offset.value_or(kNullValue) && this->size == size && this->stride == stride); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("size", size); arguments.emplace_back("stride", stride); arguments.emplace_back(has_storage_offset ? loctx->GetOutputOp(operand(i++)) : nullptr); torch::lazy::TSOpVector as_strided_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(as_strided_copy_out.size(), 1); return as_strided_copy_out; } ::std::vector size; ::std::vector stride; bool has_storage_offset: 1; }; class AsStridedScatter : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::as_strided_scatter); } AsStridedScatter(const torch::lazy::Value& self, const torch::lazy::Value& src, const ::std::vector& size, const ::std::vector& stride, const ::std::optional& storage_offset, std::vector&& shapes) : TsNode( AsStridedScatter::ClassOpKind(), OpList{self, src, storage_offset.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(size, stride)), size(size), stride(stride) { has_storage_offset = !!storage_offset; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", size=" << size; ss << ", stride=" << stride; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& src, const ::std::vector& size, const ::std::vector& stride, const ::std::optional& storage_offset) const { size_t i = 0; return (operand(i++) == self && operand(i++) == src && nullable_operand(i++) == storage_offset.value_or(kNullValue) && this->size == size && this->stride == stride); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("size", size); arguments.emplace_back("stride", stride); arguments.emplace_back(has_storage_offset ? loctx->GetOutputOp(operand(i++)) : nullptr); torch::lazy::TSOpVector as_strided_scatter_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(as_strided_scatter_out.size(), 1); return as_strided_scatter_out; } ::std::vector size; ::std::vector stride; bool has_storage_offset: 1; }; class AvgPool2d : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::avg_pool2d); } AvgPool2d(const torch::lazy::Value& self, const ::std::vector& kernel_size, const ::std::vector& stride, const ::std::vector& padding, const bool& ceil_mode, const bool& count_include_pad, const ::std::optional& divisor_override, std::vector&& shapes) : TsNode( AvgPool2d::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)), kernel_size(kernel_size), stride(stride), padding(padding), ceil_mode(ceil_mode), count_include_pad(count_include_pad), divisor_override(divisor_override) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", kernel_size=" << kernel_size; ss << ", stride=" << stride; ss << ", padding=" << padding; ss << ", ceil_mode=" << ceil_mode; ss << ", count_include_pad=" << count_include_pad; if (divisor_override.has_value()) { ss << ", divisor_override=" << divisor_override.value(); } else { ss << ", divisor_override=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& kernel_size, const ::std::vector& stride, const ::std::vector& padding, const bool& ceil_mode, const bool& count_include_pad, const ::std::optional& divisor_override) const { size_t i = 0; return (operand(i++) == self && this->kernel_size == kernel_size && this->stride == stride && this->padding == padding && this->ceil_mode == ceil_mode && this->count_include_pad == count_include_pad && ((!this->divisor_override&&!divisor_override) || (this->divisor_override&&divisor_override && *(this->divisor_override) == *divisor_override))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(7); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("kernel_size", kernel_size); arguments.emplace_back("stride", stride); arguments.emplace_back("padding", padding); arguments.emplace_back("ceil_mode", ceil_mode); arguments.emplace_back("count_include_pad", count_include_pad); arguments.emplace_back("divisor_override", divisor_override); torch::lazy::TSOpVector avg_pool2d_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(avg_pool2d_out.size(), 1); return avg_pool2d_out; } ::std::vector kernel_size; ::std::vector stride; ::std::vector padding; bool ceil_mode; bool count_include_pad; ::std::optional divisor_override; }; class AvgPool2dBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::avg_pool2d_backward); } AvgPool2dBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const ::std::vector& kernel_size, const ::std::vector& stride, const ::std::vector& padding, const bool& ceil_mode, const bool& count_include_pad, const ::std::optional& divisor_override, std::vector&& shapes) : TsNode( AvgPool2dBackward::ClassOpKind(), OpList{grad_output, self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)), kernel_size(kernel_size), stride(stride), padding(padding), ceil_mode(ceil_mode), count_include_pad(count_include_pad), divisor_override(divisor_override) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", kernel_size=" << kernel_size; ss << ", stride=" << stride; ss << ", padding=" << padding; ss << ", ceil_mode=" << ceil_mode; ss << ", count_include_pad=" << count_include_pad; if (divisor_override.has_value()) { ss << ", divisor_override=" << divisor_override.value(); } else { ss << ", divisor_override=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const ::std::vector& kernel_size, const ::std::vector& stride, const ::std::vector& padding, const bool& ceil_mode, const bool& count_include_pad, const ::std::optional& divisor_override) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && this->kernel_size == kernel_size && this->stride == stride && this->padding == padding && this->ceil_mode == ceil_mode && this->count_include_pad == count_include_pad && ((!this->divisor_override&&!divisor_override) || (this->divisor_override&&divisor_override && *(this->divisor_override) == *divisor_override))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(8); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("kernel_size", kernel_size); arguments.emplace_back("stride", stride); arguments.emplace_back("padding", padding); arguments.emplace_back("ceil_mode", ceil_mode); arguments.emplace_back("count_include_pad", count_include_pad); arguments.emplace_back("divisor_override", divisor_override); torch::lazy::TSOpVector avg_pool2d_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(avg_pool2d_backward_out.size(), 1); return avg_pool2d_backward_out; } ::std::vector kernel_size; ::std::vector stride; ::std::vector padding; bool ceil_mode; bool count_include_pad; ::std::optional divisor_override; }; class Baddbmm : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::baddbmm); } Baddbmm(const torch::lazy::Value& self, const torch::lazy::Value& batch1, const torch::lazy::Value& batch2, const torch::lazy::Value& beta, const torch::lazy::Value& alpha, std::vector&& shapes) : TsNode( Baddbmm::ClassOpKind(), OpList{self, batch1, batch2, beta, alpha}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& batch1, const torch::lazy::Value& batch2, const torch::lazy::Value& beta, const torch::lazy::Value& alpha) const { size_t i = 0; return (operand(i++) == self && operand(i++) == batch1 && operand(i++) == batch2 && operand(i++) == beta && operand(i++) == alpha); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(2); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("beta", loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("alpha", loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector baddbmm_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(baddbmm_out.size(), 1); return baddbmm_out; } }; class Bernoulli : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::bernoulli); } Bernoulli(const torch::lazy::Value& self, const ::std::optional& generator, std::vector&& shapes) : TsNode( Bernoulli::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(generator)), generator(generator) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); if (generator.has_value()) { ss << ", generator=" << "torch.Generator()"; } else { ss << ", generator=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::optional& generator) const { size_t i = 0; return (operand(i++) == self && ((!this->generator&&!generator) || (this->generator&&generator && *(this->generator) == *generator))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("generator", generator); torch::lazy::TSOpVector bernoulli_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(bernoulli_out.size(), 1); return bernoulli_out; } ::std::optional generator; }; class BernoulliP : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::bernoulli); } BernoulliP(const torch::lazy::Value& self, const double& p, const ::std::optional& generator, std::vector&& shapes) : TsNode( BernoulliP::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(p, generator)), p(p), generator(generator) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", p=" << p; if (generator.has_value()) { ss << ", generator=" << "torch.Generator()"; } else { ss << ", generator=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const double& p, const ::std::optional& generator) const { size_t i = 0; return (operand(i++) == self && this->p == p && ((!this->generator&&!generator) || (this->generator&&generator && *(this->generator) == *generator))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("p", p); kwarguments.emplace_back("generator", generator); torch::lazy::TSOpVector bernoulli_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(bernoulli_out.size(), 1); return bernoulli_out; } double p; ::std::optional generator; }; class BinaryCrossEntropy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::binary_cross_entropy); } BinaryCrossEntropy(const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction, std::vector&& shapes) : TsNode( BinaryCrossEntropy::ClassOpKind(), OpList{self, target, weight.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(reduction)), reduction(reduction) { has_weight = !!weight; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", reduction=" << reduction; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction) const { size_t i = 0; return (operand(i++) == self && operand(i++) == target && nullable_operand(i++) == weight.value_or(kNullValue) && this->reduction == reduction); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_weight ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("reduction", reduction); torch::lazy::TSOpVector binary_cross_entropy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(binary_cross_entropy_out.size(), 1); return binary_cross_entropy_out; } int64_t reduction; bool has_weight: 1; }; class BinaryCrossEntropyBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::binary_cross_entropy_backward); } BinaryCrossEntropyBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction, std::vector&& shapes) : TsNode( BinaryCrossEntropyBackward::ClassOpKind(), OpList{grad_output, self, target, weight.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(reduction)), reduction(reduction) { has_weight = !!weight; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", reduction=" << reduction; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && operand(i++) == target && nullable_operand(i++) == weight.value_or(kNullValue) && this->reduction == reduction); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_weight ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("reduction", reduction); torch::lazy::TSOpVector binary_cross_entropy_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(binary_cross_entropy_backward_out.size(), 1); return binary_cross_entropy_backward_out; } int64_t reduction; bool has_weight: 1; }; class BitwiseAndTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::bitwise_and); } BitwiseAndTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( BitwiseAndTensor::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector bitwise_and_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(bitwise_and_out.size(), 1); return bitwise_and_out; } }; class BitwiseOrTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::bitwise_or); } BitwiseOrTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( BitwiseOrTensor::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector bitwise_or_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(bitwise_or_out.size(), 1); return bitwise_or_out; } }; class Bmm : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::bmm); } Bmm(const torch::lazy::Value& self, const torch::lazy::Value& mat2, std::vector&& shapes) : TsNode( Bmm::ClassOpKind(), OpList{self, mat2}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& mat2) const { size_t i = 0; return (operand(i++) == self && operand(i++) == mat2); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector bmm_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(bmm_out.size(), 1); return bmm_out; } }; class Cat : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::cat); } Cat(const torch::lazy::Value& tensors, const int64_t& dim, std::vector&& shapes) : TsNode( Cat::ClassOpKind(), OpList{tensors}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& tensors, const int64_t& dim) const { size_t i = 0; return (operand(i++) == tensors && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); torch::lazy::TSOpVector cat_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(cat_out.size(), 1); return cat_out; } int64_t dim; }; class Clamp : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::clamp); } Clamp(const torch::lazy::Value& self, const ::std::optional& min, const ::std::optional& max, std::vector&& shapes) : TsNode( Clamp::ClassOpKind(), OpList{self, min.value_or(kNullValue), max.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { has_min = !!min; has_max = !!max; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::optional& min, const ::std::optional& max) const { size_t i = 0; return (operand(i++) == self && nullable_operand(i++) == min.value_or(kNullValue) && nullable_operand(i++) == max.value_or(kNullValue)); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_min ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_max ? loctx->GetOutputOp(operand(i++)) : nullptr); torch::lazy::TSOpVector clamp_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(clamp_out.size(), 1); return clamp_out; } bool has_min: 1; bool has_max: 1; }; class ClampMin : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::clamp_min); } ClampMin(const torch::lazy::Value& self, const torch::lazy::Value& min, std::vector&& shapes) : TsNode( ClampMin::ClassOpKind(), OpList{self, min}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& min) const { size_t i = 0; return (operand(i++) == self && operand(i++) == min); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector clamp_min_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(clamp_min_out.size(), 1); return clamp_min_out; } }; class ConstantPadNd : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::constant_pad_nd); } ConstantPadNd(const torch::lazy::Value& self, const ::std::vector& pad, const torch::lazy::Value& value, std::vector&& shapes) : TsNode( ConstantPadNd::ClassOpKind(), OpList{self, value}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(pad)), pad(pad) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", pad=" << pad; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& pad, const torch::lazy::Value& value) const { size_t i = 0; return (operand(i++) == self && operand(i++) == value && this->pad == pad); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("pad", pad); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector constant_pad_nd_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(constant_pad_nd_out.size(), 1); return constant_pad_nd_out; } ::std::vector pad; }; class Convolution : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::convolution); } Convolution(const torch::lazy::Value& input, const torch::lazy::Value& weight, const ::std::optional& bias, const ::std::vector& stride, const ::std::vector& padding, const ::std::vector& dilation, const bool& transposed, const ::std::vector& output_padding, const int64_t& groups, std::vector&& shapes) : TsNode( Convolution::ClassOpKind(), OpList{input, weight, bias.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(stride, padding, dilation, transposed, output_padding, groups)), stride(stride), padding(padding), dilation(dilation), transposed(transposed), output_padding(output_padding), groups(groups) { has_bias = !!bias; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", stride=" << stride; ss << ", padding=" << padding; ss << ", dilation=" << dilation; ss << ", transposed=" << transposed; ss << ", output_padding=" << output_padding; ss << ", groups=" << groups; return ss.str(); } bool CanBeReused(const torch::lazy::Value& input, const torch::lazy::Value& weight, const ::std::optional& bias, const ::std::vector& stride, const ::std::vector& padding, const ::std::vector& dilation, const bool& transposed, const ::std::vector& output_padding, const int64_t& groups) const { size_t i = 0; return (operand(i++) == input && operand(i++) == weight && nullable_operand(i++) == bias.value_or(kNullValue) && this->stride == stride && this->padding == padding && this->dilation == dilation && this->transposed == transposed && this->output_padding == output_padding && this->groups == groups); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(9); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_bias ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("stride", stride); arguments.emplace_back("padding", padding); arguments.emplace_back("dilation", dilation); arguments.emplace_back("transposed", transposed); arguments.emplace_back("output_padding", output_padding); arguments.emplace_back("groups", groups); torch::lazy::TSOpVector convolution_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(convolution_out.size(), 1); return convolution_out; } ::std::vector stride; ::std::vector padding; ::std::vector dilation; bool transposed; ::std::vector output_padding; int64_t groups; bool has_bias: 1; }; class ConvolutionBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::convolution_backward); } ConvolutionBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& input, const torch::lazy::Value& weight, const ::std::optional<::std::vector>& bias_sizes, const ::std::vector& stride, const ::std::vector& padding, const ::std::vector& dilation, const bool& transposed, const ::std::vector& output_padding, const int64_t& groups, const ::std::vector& output_mask, std::vector&& shapes) : TsNode( ConvolutionBackward::ClassOpKind(), OpList{grad_output, input, weight}, std::move(shapes), /* num_outputs */ 3, torch::lazy::MHash(bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask)), bias_sizes(bias_sizes), stride(stride), padding(padding), dilation(dilation), transposed(transposed), output_padding(output_padding), groups(groups), output_mask(output_mask) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); if (bias_sizes.has_value()) { ss << ", bias_sizes=" << bias_sizes.value(); } else { ss << ", bias_sizes=null"; } ss << ", stride=" << stride; ss << ", padding=" << padding; ss << ", dilation=" << dilation; ss << ", transposed=" << transposed; ss << ", output_padding=" << output_padding; ss << ", groups=" << groups; ss << ", output_mask=" << output_mask; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& input, const torch::lazy::Value& weight, const ::std::optional<::std::vector>& bias_sizes, const ::std::vector& stride, const ::std::vector& padding, const ::std::vector& dilation, const bool& transposed, const ::std::vector& output_padding, const int64_t& groups, const ::std::vector& output_mask) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == input && operand(i++) == weight && ((!this->bias_sizes&&!bias_sizes) || (this->bias_sizes&&bias_sizes && *(this->bias_sizes) == *bias_sizes)) && this->stride == stride && this->padding == padding && this->dilation == dilation && this->transposed == transposed && this->output_padding == output_padding && this->groups == groups && this->output_mask == output_mask); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(11); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("bias_sizes", bias_sizes); arguments.emplace_back("stride", stride); arguments.emplace_back("padding", padding); arguments.emplace_back("dilation", dilation); arguments.emplace_back("transposed", transposed); arguments.emplace_back("output_padding", output_padding); arguments.emplace_back("groups", groups); arguments.emplace_back("output_mask", output_mask); torch::lazy::TSOpVector convolution_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(convolution_backward_out.size(), 3); return convolution_backward_out; } ::std::optional<::std::vector> bias_sizes; ::std::vector stride; ::std::vector padding; ::std::vector dilation; bool transposed; ::std::vector output_padding; int64_t groups; ::std::vector output_mask; }; class Cos : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::cos); } Cos(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Cos::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector cos_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(cos_out.size(), 1); return cos_out; } }; class Cumsum : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::cumsum); } Cumsum(const torch::lazy::Value& self, const int64_t& dim, const ::std::optional& dtype, std::vector&& shapes) : TsNode( Cumsum::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, dtype)), dim(dim), dtype(dtype) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; if (dtype.has_value()) { ss << ", dtype=" << dtype.value(); } else { ss << ", dtype=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim, const ::std::optional& dtype) const { size_t i = 0; return (operand(i++) == self && this->dim == dim && ((!this->dtype&&!dtype) || (this->dtype&&dtype && *(this->dtype) == *dtype))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); kwarguments.emplace_back("dtype", dtype); torch::lazy::TSOpVector cumsum_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(cumsum_out.size(), 1); return cumsum_out; } int64_t dim; ::std::optional dtype; }; class DetachCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::detach_copy); } DetachCopy(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( DetachCopy::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector detach_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(detach_copy_out.size(), 1); return detach_copy_out; } }; class DiagonalCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::diagonal_copy); } DiagonalCopy(const torch::lazy::Value& self, const int64_t& offset, const int64_t& dim1, const int64_t& dim2, std::vector&& shapes) : TsNode( DiagonalCopy::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(offset, dim1, dim2)), offset(offset), dim1(dim1), dim2(dim2) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", offset=" << offset; ss << ", dim1=" << dim1; ss << ", dim2=" << dim2; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& offset, const int64_t& dim1, const int64_t& dim2) const { size_t i = 0; return (operand(i++) == self && this->offset == offset && this->dim1 == dim1 && this->dim2 == dim2); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("offset", offset); arguments.emplace_back("dim1", dim1); arguments.emplace_back("dim2", dim2); torch::lazy::TSOpVector diagonal_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(diagonal_copy_out.size(), 1); return diagonal_copy_out; } int64_t offset; int64_t dim1; int64_t dim2; }; class DiagonalScatter : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::diagonal_scatter); } DiagonalScatter(const torch::lazy::Value& self, const torch::lazy::Value& src, const int64_t& offset, const int64_t& dim1, const int64_t& dim2, std::vector&& shapes) : TsNode( DiagonalScatter::ClassOpKind(), OpList{self, src}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(offset, dim1, dim2)), offset(offset), dim1(dim1), dim2(dim2) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", offset=" << offset; ss << ", dim1=" << dim1; ss << ", dim2=" << dim2; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& src, const int64_t& offset, const int64_t& dim1, const int64_t& dim2) const { size_t i = 0; return (operand(i++) == self && operand(i++) == src && this->offset == offset && this->dim1 == dim1 && this->dim2 == dim2); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("offset", offset); arguments.emplace_back("dim1", dim1); arguments.emplace_back("dim2", dim2); torch::lazy::TSOpVector diagonal_scatter_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(diagonal_scatter_out.size(), 1); return diagonal_scatter_out; } int64_t offset; int64_t dim1; int64_t dim2; }; class DivTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::div); } DivTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( DivTensor::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector div_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(div_out.size(), 1); return div_out; } }; class DivTensorMode : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::div); } DivTensorMode(const torch::lazy::Value& self, const torch::lazy::Value& other, const ::std::optional& rounding_mode, std::vector&& shapes) : TsNode( DivTensorMode::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(rounding_mode)), rounding_mode(rounding_mode.has_value() ? ::std::make_optional(std::string(*rounding_mode)) : ::std::nullopt) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); if (rounding_mode.has_value()) { ss << ", rounding_mode=" << rounding_mode.value(); } else { ss << ", rounding_mode=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other, const ::std::optional& rounding_mode) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other && ((!this->rounding_mode&&!rounding_mode) || (this->rounding_mode&&rounding_mode && *(this->rounding_mode) == *rounding_mode))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("rounding_mode", rounding_mode); torch::lazy::TSOpVector div_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(div_out.size(), 1); return div_out; } ::std::optional rounding_mode; }; class Elu : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::elu); } Elu(const torch::lazy::Value& self, const torch::lazy::Value& alpha, const torch::lazy::Value& scale, const torch::lazy::Value& input_scale, std::vector&& shapes) : TsNode( Elu::ClassOpKind(), OpList{self, alpha, scale, input_scale}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& alpha, const torch::lazy::Value& scale, const torch::lazy::Value& input_scale) const { size_t i = 0; return (operand(i++) == self && operand(i++) == alpha && operand(i++) == scale && operand(i++) == input_scale); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector elu_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(elu_out.size(), 1); return elu_out; } }; class EluBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::elu_backward); } EluBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& alpha, const torch::lazy::Value& scale, const torch::lazy::Value& input_scale, const bool& is_result, const torch::lazy::Value& self_or_result, std::vector&& shapes) : TsNode( EluBackward::ClassOpKind(), OpList{grad_output, alpha, scale, input_scale, self_or_result}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(is_result)), is_result(is_result) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", is_result=" << is_result; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& alpha, const torch::lazy::Value& scale, const torch::lazy::Value& input_scale, const bool& is_result, const torch::lazy::Value& self_or_result) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == alpha && operand(i++) == scale && operand(i++) == input_scale && operand(i++) == self_or_result && this->is_result == is_result); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(6); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("is_result", is_result); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector elu_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(elu_backward_out.size(), 1); return elu_backward_out; } bool is_result; }; class Embedding : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::embedding); } Embedding(const torch::lazy::Value& weight, const torch::lazy::Value& indices, const int64_t& padding_idx, const bool& scale_grad_by_freq, const bool& sparse, std::vector&& shapes) : TsNode( Embedding::ClassOpKind(), OpList{weight, indices}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(padding_idx, scale_grad_by_freq, sparse)), padding_idx(padding_idx), scale_grad_by_freq(scale_grad_by_freq), sparse(sparse) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", padding_idx=" << padding_idx; ss << ", scale_grad_by_freq=" << scale_grad_by_freq; ss << ", sparse=" << sparse; return ss.str(); } bool CanBeReused(const torch::lazy::Value& weight, const torch::lazy::Value& indices, const int64_t& padding_idx, const bool& scale_grad_by_freq, const bool& sparse) const { size_t i = 0; return (operand(i++) == weight && operand(i++) == indices && this->padding_idx == padding_idx && this->scale_grad_by_freq == scale_grad_by_freq && this->sparse == sparse); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("padding_idx", padding_idx); arguments.emplace_back("scale_grad_by_freq", scale_grad_by_freq); arguments.emplace_back("sparse", sparse); torch::lazy::TSOpVector embedding_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(embedding_out.size(), 1); return embedding_out; } int64_t padding_idx; bool scale_grad_by_freq; bool sparse; }; class EmbeddingDenseBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::embedding_dense_backward); } EmbeddingDenseBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& indices, const int64_t& num_weights, const int64_t& padding_idx, const bool& scale_grad_by_freq, std::vector&& shapes) : TsNode( EmbeddingDenseBackward::ClassOpKind(), OpList{grad_output, indices}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(num_weights, padding_idx, scale_grad_by_freq)), num_weights(num_weights), padding_idx(padding_idx), scale_grad_by_freq(scale_grad_by_freq) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", num_weights=" << num_weights; ss << ", padding_idx=" << padding_idx; ss << ", scale_grad_by_freq=" << scale_grad_by_freq; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& indices, const int64_t& num_weights, const int64_t& padding_idx, const bool& scale_grad_by_freq) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == indices && this->num_weights == num_weights && this->padding_idx == padding_idx && this->scale_grad_by_freq == scale_grad_by_freq); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("num_weights", num_weights); arguments.emplace_back("padding_idx", padding_idx); arguments.emplace_back("scale_grad_by_freq", scale_grad_by_freq); torch::lazy::TSOpVector embedding_dense_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(embedding_dense_backward_out.size(), 1); return embedding_dense_backward_out; } int64_t num_weights; int64_t padding_idx; bool scale_grad_by_freq; }; class EqScalar : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::eq); } EqScalar(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( EqScalar::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector eq_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(eq_out.size(), 1); return eq_out; } }; class EqTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::eq); } EqTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( EqTensor::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector eq_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(eq_out.size(), 1); return eq_out; } }; class Exp : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::exp); } Exp(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Exp::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector exp_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(exp_out.size(), 1); return exp_out; } }; class ExpandCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::expand_copy); } ExpandCopy(const torch::lazy::Value& self, const ::std::vector& size, const bool& implicit, std::vector&& shapes) : TsNode( ExpandCopy::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(size, implicit)), size(size), implicit(implicit) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", size=" << size; ss << ", implicit=" << implicit; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& size, const bool& implicit) const { size_t i = 0; return (operand(i++) == self && this->size == size && this->implicit == implicit); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("size", size); kwarguments.emplace_back("implicit", implicit); torch::lazy::TSOpVector expand_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(expand_copy_out.size(), 1); return expand_copy_out; } ::std::vector size; bool implicit; }; class Flip : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::flip); } Flip(const torch::lazy::Value& self, const ::std::vector& dims, std::vector&& shapes) : TsNode( Flip::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dims)), dims(dims) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dims=" << dims; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& dims) const { size_t i = 0; return (operand(i++) == self && this->dims == dims); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dims", dims); torch::lazy::TSOpVector flip_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(flip_out.size(), 1); return flip_out; } ::std::vector dims; }; class Floor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::floor); } Floor(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Floor::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector floor_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(floor_out.size(), 1); return floor_out; } }; class Frac : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::frac); } Frac(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Frac::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector frac_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(frac_out.size(), 1); return frac_out; } }; class Gather : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::gather); } Gather(const torch::lazy::Value& self, const int64_t& dim, const torch::lazy::Value& index, const bool& sparse_grad, std::vector&& shapes) : TsNode( Gather::ClassOpKind(), OpList{self, index}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, sparse_grad)), dim(dim), sparse_grad(sparse_grad) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; ss << ", sparse_grad=" << sparse_grad; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim, const torch::lazy::Value& index, const bool& sparse_grad) const { size_t i = 0; return (operand(i++) == self && operand(i++) == index && this->dim == dim && this->sparse_grad == sparse_grad); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("sparse_grad", sparse_grad); torch::lazy::TSOpVector gather_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(gather_out.size(), 1); return gather_out; } int64_t dim; bool sparse_grad; }; class GeScalar : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::ge); } GeScalar(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( GeScalar::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector ge_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(ge_out.size(), 1); return ge_out; } }; class GeTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::ge); } GeTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( GeTensor::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector ge_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(ge_out.size(), 1); return ge_out; } }; class Gelu : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::gelu); } Gelu(const torch::lazy::Value& self, const c10::string_view& approximate, std::vector&& shapes) : TsNode( Gelu::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(approximate)), approximate(approximate) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", approximate=" << approximate; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const c10::string_view& approximate) const { size_t i = 0; return (operand(i++) == self && this->approximate == approximate); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("approximate", approximate); torch::lazy::TSOpVector gelu_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(gelu_out.size(), 1); return gelu_out; } std::string approximate; }; class GeluBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::gelu_backward); } GeluBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const c10::string_view& approximate, std::vector&& shapes) : TsNode( GeluBackward::ClassOpKind(), OpList{grad_output, self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(approximate)), approximate(approximate) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", approximate=" << approximate; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const c10::string_view& approximate) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && this->approximate == approximate); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("approximate", approximate); torch::lazy::TSOpVector gelu_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(gelu_backward_out.size(), 1); return gelu_backward_out; } std::string approximate; }; class Glu : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::glu); } Glu(const torch::lazy::Value& self, const int64_t& dim, std::vector&& shapes) : TsNode( Glu::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim) const { size_t i = 0; return (operand(i++) == self && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); torch::lazy::TSOpVector glu_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(glu_out.size(), 1); return glu_out; } int64_t dim; }; class GluBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::glu_backward); } GluBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const int64_t& dim, std::vector&& shapes) : TsNode( GluBackward::ClassOpKind(), OpList{grad_output, self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const int64_t& dim) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); torch::lazy::TSOpVector glu_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(glu_backward_out.size(), 1); return glu_backward_out; } int64_t dim; }; class GluJvp : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::glu_jvp); } GluJvp(const torch::lazy::Value& glu, const torch::lazy::Value& x, const torch::lazy::Value& dx, const int64_t& dim, std::vector&& shapes) : TsNode( GluJvp::ClassOpKind(), OpList{glu, x, dx}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& glu, const torch::lazy::Value& x, const torch::lazy::Value& dx, const int64_t& dim) const { size_t i = 0; return (operand(i++) == glu && operand(i++) == x && operand(i++) == dx && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); torch::lazy::TSOpVector glu_jvp_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(glu_jvp_out.size(), 1); return glu_jvp_out; } int64_t dim; }; class GridSampler2d : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::grid_sampler_2d); } GridSampler2d(const torch::lazy::Value& input, const torch::lazy::Value& grid, const int64_t& interpolation_mode, const int64_t& padding_mode, const bool& align_corners, std::vector&& shapes) : TsNode( GridSampler2d::ClassOpKind(), OpList{input, grid}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(interpolation_mode, padding_mode, align_corners)), interpolation_mode(interpolation_mode), padding_mode(padding_mode), align_corners(align_corners) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", interpolation_mode=" << interpolation_mode; ss << ", padding_mode=" << padding_mode; ss << ", align_corners=" << align_corners; return ss.str(); } bool CanBeReused(const torch::lazy::Value& input, const torch::lazy::Value& grid, const int64_t& interpolation_mode, const int64_t& padding_mode, const bool& align_corners) const { size_t i = 0; return (operand(i++) == input && operand(i++) == grid && this->interpolation_mode == interpolation_mode && this->padding_mode == padding_mode && this->align_corners == align_corners); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("interpolation_mode", interpolation_mode); arguments.emplace_back("padding_mode", padding_mode); arguments.emplace_back("align_corners", align_corners); torch::lazy::TSOpVector grid_sampler_2d_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(grid_sampler_2d_out.size(), 1); return grid_sampler_2d_out; } int64_t interpolation_mode; int64_t padding_mode; bool align_corners; }; class GridSampler2dBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::grid_sampler_2d_backward); } GridSampler2dBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& input, const torch::lazy::Value& grid, const int64_t& interpolation_mode, const int64_t& padding_mode, const bool& align_corners, const ::std::vector& output_mask, std::vector&& shapes) : TsNode( GridSampler2dBackward::ClassOpKind(), OpList{grad_output, input, grid}, std::move(shapes), /* num_outputs */ 2, torch::lazy::MHash(interpolation_mode, padding_mode, align_corners, output_mask)), interpolation_mode(interpolation_mode), padding_mode(padding_mode), align_corners(align_corners), output_mask(output_mask) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", interpolation_mode=" << interpolation_mode; ss << ", padding_mode=" << padding_mode; ss << ", align_corners=" << align_corners; ss << ", output_mask=" << output_mask; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& input, const torch::lazy::Value& grid, const int64_t& interpolation_mode, const int64_t& padding_mode, const bool& align_corners, const ::std::vector& output_mask) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == input && operand(i++) == grid && this->interpolation_mode == interpolation_mode && this->padding_mode == padding_mode && this->align_corners == align_corners && this->output_mask == output_mask); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(7); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("interpolation_mode", interpolation_mode); arguments.emplace_back("padding_mode", padding_mode); arguments.emplace_back("align_corners", align_corners); arguments.emplace_back("output_mask", output_mask); torch::lazy::TSOpVector grid_sampler_2d_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(grid_sampler_2d_backward_out.size(), 2); return grid_sampler_2d_backward_out; } int64_t interpolation_mode; int64_t padding_mode; bool align_corners; ::std::vector output_mask; }; class GtScalar : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::gt); } GtScalar(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( GtScalar::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector gt_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(gt_out.size(), 1); return gt_out; } }; class GtTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::gt); } GtTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( GtTensor::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector gt_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(gt_out.size(), 1); return gt_out; } }; class Hardsigmoid : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::hardsigmoid); } Hardsigmoid(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Hardsigmoid::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector hardsigmoid_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(hardsigmoid_out.size(), 1); return hardsigmoid_out; } }; class IndexSelect : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::index_select); } IndexSelect(const torch::lazy::Value& self, const int64_t& dim, const torch::lazy::Value& index, std::vector&& shapes) : TsNode( IndexSelect::ClassOpKind(), OpList{self, index}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim, const torch::lazy::Value& index) const { size_t i = 0; return (operand(i++) == self && operand(i++) == index && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector index_select_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(index_select_out.size(), 1); return index_select_out; } int64_t dim; }; class LeScalar : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::le); } LeScalar(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( LeScalar::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector le_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(le_out.size(), 1); return le_out; } }; class LeTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::le); } LeTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( LeTensor::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector le_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(le_out.size(), 1); return le_out; } }; class LeakyRelu : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::leaky_relu); } LeakyRelu(const torch::lazy::Value& self, const torch::lazy::Value& negative_slope, std::vector&& shapes) : TsNode( LeakyRelu::ClassOpKind(), OpList{self, negative_slope}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& negative_slope) const { size_t i = 0; return (operand(i++) == self && operand(i++) == negative_slope); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector leaky_relu_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(leaky_relu_out.size(), 1); return leaky_relu_out; } }; class LeakyReluBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::leaky_relu_backward); } LeakyReluBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& negative_slope, const bool& self_is_result, std::vector&& shapes) : TsNode( LeakyReluBackward::ClassOpKind(), OpList{grad_output, self, negative_slope}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(self_is_result)), self_is_result(self_is_result) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", self_is_result=" << self_is_result; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& negative_slope, const bool& self_is_result) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && operand(i++) == negative_slope && this->self_is_result == self_is_result); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("self_is_result", self_is_result); torch::lazy::TSOpVector leaky_relu_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(leaky_relu_backward_out.size(), 1); return leaky_relu_backward_out; } bool self_is_result; }; class Log : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::log); } Log(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Log::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector log_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(log_out.size(), 1); return log_out; } }; class Log2 : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::log2); } Log2(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Log2::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector log2_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(log2_out.size(), 1); return log2_out; } }; class LogSigmoidBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::log_sigmoid_backward); } LogSigmoidBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& buffer, std::vector&& shapes) : TsNode( LogSigmoidBackward::ClassOpKind(), OpList{grad_output, self, buffer}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& buffer) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && operand(i++) == buffer); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector log_sigmoid_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(log_sigmoid_backward_out.size(), 1); return log_sigmoid_backward_out; } }; class LogSigmoidForward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::log_sigmoid_forward); } LogSigmoidForward(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( LogSigmoidForward::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 2, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector log_sigmoid_forward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(log_sigmoid_forward_out.size(), 2); return log_sigmoid_forward_out; } }; class Logdet : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::logdet); } Logdet(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Logdet::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector logdet_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(logdet_out.size(), 1); return logdet_out; } }; class LtScalar : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::lt); } LtScalar(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( LtScalar::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector lt_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(lt_out.size(), 1); return lt_out; } }; class LtTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::lt); } LtTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( LtTensor::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector lt_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(lt_out.size(), 1); return lt_out; } }; class MaskedFillScalar : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::masked_fill); } MaskedFillScalar(const torch::lazy::Value& self, const torch::lazy::Value& mask, const torch::lazy::Value& value, std::vector&& shapes) : TsNode( MaskedFillScalar::ClassOpKind(), OpList{self, mask, value}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& mask, const torch::lazy::Value& value) const { size_t i = 0; return (operand(i++) == self && operand(i++) == mask && operand(i++) == value); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector masked_fill_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(masked_fill_out.size(), 1); return masked_fill_out; } }; class MaskedFillTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::masked_fill); } MaskedFillTensor(const torch::lazy::Value& self, const torch::lazy::Value& mask, const torch::lazy::Value& value, std::vector&& shapes) : TsNode( MaskedFillTensor::ClassOpKind(), OpList{self, mask, value}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& mask, const torch::lazy::Value& value) const { size_t i = 0; return (operand(i++) == self && operand(i++) == mask && operand(i++) == value); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector masked_fill_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(masked_fill_out.size(), 1); return masked_fill_out; } }; class MaxDim : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::max); } MaxDim(const torch::lazy::Value& self, const int64_t& dim, const bool& keepdim, std::vector&& shapes) : TsNode( MaxDim::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 2, torch::lazy::MHash(dim, keepdim)), dim(dim), keepdim(keepdim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; ss << ", keepdim=" << keepdim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim, const bool& keepdim) const { size_t i = 0; return (operand(i++) == self && this->dim == dim && this->keepdim == keepdim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back("keepdim", keepdim); torch::lazy::TSOpVector max_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(max_out.size(), 2); return max_out; } int64_t dim; bool keepdim; }; class Max : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::max); } Max(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Max::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector max_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(max_out.size(), 1); return max_out; } }; class MaxPool2dWithIndices : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::max_pool2d_with_indices); } MaxPool2dWithIndices(const torch::lazy::Value& self, const ::std::vector& kernel_size, const ::std::vector& stride, const ::std::vector& padding, const ::std::vector& dilation, const bool& ceil_mode, std::vector&& shapes) : TsNode( MaxPool2dWithIndices::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 2, torch::lazy::MHash(kernel_size, stride, padding, dilation, ceil_mode)), kernel_size(kernel_size), stride(stride), padding(padding), dilation(dilation), ceil_mode(ceil_mode) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", kernel_size=" << kernel_size; ss << ", stride=" << stride; ss << ", padding=" << padding; ss << ", dilation=" << dilation; ss << ", ceil_mode=" << ceil_mode; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& kernel_size, const ::std::vector& stride, const ::std::vector& padding, const ::std::vector& dilation, const bool& ceil_mode) const { size_t i = 0; return (operand(i++) == self && this->kernel_size == kernel_size && this->stride == stride && this->padding == padding && this->dilation == dilation && this->ceil_mode == ceil_mode); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(6); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("kernel_size", kernel_size); arguments.emplace_back("stride", stride); arguments.emplace_back("padding", padding); arguments.emplace_back("dilation", dilation); arguments.emplace_back("ceil_mode", ceil_mode); torch::lazy::TSOpVector max_pool2d_with_indices_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(max_pool2d_with_indices_out.size(), 2); return max_pool2d_with_indices_out; } ::std::vector kernel_size; ::std::vector stride; ::std::vector padding; ::std::vector dilation; bool ceil_mode; }; class MaxPool2dWithIndicesBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::max_pool2d_with_indices_backward); } MaxPool2dWithIndicesBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const ::std::vector& kernel_size, const ::std::vector& stride, const ::std::vector& padding, const ::std::vector& dilation, const bool& ceil_mode, const torch::lazy::Value& indices, std::vector&& shapes) : TsNode( MaxPool2dWithIndicesBackward::ClassOpKind(), OpList{grad_output, self, indices}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(kernel_size, stride, padding, dilation, ceil_mode)), kernel_size(kernel_size), stride(stride), padding(padding), dilation(dilation), ceil_mode(ceil_mode) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", kernel_size=" << kernel_size; ss << ", stride=" << stride; ss << ", padding=" << padding; ss << ", dilation=" << dilation; ss << ", ceil_mode=" << ceil_mode; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const ::std::vector& kernel_size, const ::std::vector& stride, const ::std::vector& padding, const ::std::vector& dilation, const bool& ceil_mode, const torch::lazy::Value& indices) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && operand(i++) == indices && this->kernel_size == kernel_size && this->stride == stride && this->padding == padding && this->dilation == dilation && this->ceil_mode == ceil_mode); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(8); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("kernel_size", kernel_size); arguments.emplace_back("stride", stride); arguments.emplace_back("padding", padding); arguments.emplace_back("dilation", dilation); arguments.emplace_back("ceil_mode", ceil_mode); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector max_pool2d_with_indices_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(max_pool2d_with_indices_backward_out.size(), 1); return max_pool2d_with_indices_backward_out; } ::std::vector kernel_size; ::std::vector stride; ::std::vector padding; ::std::vector dilation; bool ceil_mode; }; class Maximum : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::maximum); } Maximum(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( Maximum::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector maximum_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(maximum_out.size(), 1); return maximum_out; } }; class Mean : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::mean); } Mean(const torch::lazy::Value& self, const ::std::optional& dtype, std::vector&& shapes) : TsNode( Mean::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dtype)), dtype(dtype) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); if (dtype.has_value()) { ss << ", dtype=" << dtype.value(); } else { ss << ", dtype=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::optional& dtype) const { size_t i = 0; return (operand(i++) == self && ((!this->dtype&&!dtype) || (this->dtype&&dtype && *(this->dtype) == *dtype))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("dtype", dtype); torch::lazy::TSOpVector mean_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(mean_out.size(), 1); return mean_out; } ::std::optional dtype; }; class MeanDim : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::mean); } MeanDim(const torch::lazy::Value& self, const ::std::optional<::std::vector>& dim, const bool& keepdim, const ::std::optional& dtype, std::vector&& shapes) : TsNode( MeanDim::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, keepdim, dtype)), dim(dim), keepdim(keepdim), dtype(dtype) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); if (dim.has_value()) { ss << ", dim=" << dim.value(); } else { ss << ", dim=null"; } ss << ", keepdim=" << keepdim; if (dtype.has_value()) { ss << ", dtype=" << dtype.value(); } else { ss << ", dtype=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::optional<::std::vector>& dim, const bool& keepdim, const ::std::optional& dtype) const { size_t i = 0; return (operand(i++) == self && ((!this->dim&&!dim) || (this->dim&&dim && *(this->dim) == *dim)) && this->keepdim == keepdim && ((!this->dtype&&!dtype) || (this->dtype&&dtype && *(this->dtype) == *dtype))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back("keepdim", keepdim); kwarguments.emplace_back("dtype", dtype); torch::lazy::TSOpVector mean_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(mean_out.size(), 1); return mean_out; } ::std::optional<::std::vector> dim; bool keepdim; ::std::optional dtype; }; class Min : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::min); } Min(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Min::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector min_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(min_out.size(), 1); return min_out; } }; class Minimum : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::minimum); } Minimum(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( Minimum::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector minimum_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(minimum_out.size(), 1); return minimum_out; } }; class Mm : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::mm); } Mm(const torch::lazy::Value& self, const torch::lazy::Value& mat2, std::vector&& shapes) : TsNode( Mm::ClassOpKind(), OpList{self, mat2}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& mat2) const { size_t i = 0; return (operand(i++) == self && operand(i++) == mat2); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector mm_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(mm_out.size(), 1); return mm_out; } }; class MulTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::mul); } MulTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( MulTensor::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector mul_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(mul_out.size(), 1); return mul_out; } }; class Mv : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::mv); } Mv(const torch::lazy::Value& self, const torch::lazy::Value& vec, std::vector&& shapes) : TsNode( Mv::ClassOpKind(), OpList{self, vec}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& vec) const { size_t i = 0; return (operand(i++) == self && operand(i++) == vec); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector mv_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(mv_out.size(), 1); return mv_out; } }; class NativeBatchNorm : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::native_batch_norm); } NativeBatchNorm(const torch::lazy::Value& input, const ::std::optional& weight, const ::std::optional& bias, const ::std::optional& running_mean, const ::std::optional& running_var, const bool& training, const double& momentum, const double& eps, std::vector&& shapes) : TsNode( NativeBatchNorm::ClassOpKind(), OpList{input, weight.value_or(kNullValue), bias.value_or(kNullValue), running_mean.value_or(kNullValue), running_var.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 3, torch::lazy::MHash(training, momentum, eps)), training(training), momentum(momentum), eps(eps) { has_weight = !!weight; has_bias = !!bias; has_running_mean = !!running_mean; has_running_var = !!running_var; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", training=" << training; ss << ", momentum=" << momentum; ss << ", eps=" << eps; return ss.str(); } bool CanBeReused(const torch::lazy::Value& input, const ::std::optional& weight, const ::std::optional& bias, const ::std::optional& running_mean, const ::std::optional& running_var, const bool& training, const double& momentum, const double& eps) const { size_t i = 0; return (operand(i++) == input && nullable_operand(i++) == weight.value_or(kNullValue) && nullable_operand(i++) == bias.value_or(kNullValue) && nullable_operand(i++) == running_mean.value_or(kNullValue) && nullable_operand(i++) == running_var.value_or(kNullValue) && this->training == training && this->momentum == momentum && this->eps == eps); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(8); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_weight ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_bias ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_running_mean ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_running_var ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("training", training); arguments.emplace_back("momentum", momentum); arguments.emplace_back("eps", eps); torch::lazy::TSOpVector native_batch_norm_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(native_batch_norm_out.size(), 3); return native_batch_norm_out; } bool training; double momentum; double eps; bool has_weight: 1; bool has_bias: 1; bool has_running_mean: 1; bool has_running_var: 1; }; class NativeBatchNormBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::native_batch_norm_backward); } NativeBatchNormBackward(const torch::lazy::Value& grad_out, const torch::lazy::Value& input, const ::std::optional& weight, const ::std::optional& running_mean, const ::std::optional& running_var, const ::std::optional& save_mean, const ::std::optional& save_invstd, const bool& train, const double& eps, const ::std::vector& output_mask, std::vector&& shapes) : TsNode( NativeBatchNormBackward::ClassOpKind(), OpList{grad_out, input, weight.value_or(kNullValue), running_mean.value_or(kNullValue), running_var.value_or(kNullValue), save_mean.value_or(kNullValue), save_invstd.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 3, torch::lazy::MHash(train, eps, output_mask)), train(train), eps(eps), output_mask(output_mask) { has_weight = !!weight; has_running_mean = !!running_mean; has_running_var = !!running_var; has_save_mean = !!save_mean; has_save_invstd = !!save_invstd; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", train=" << train; ss << ", eps=" << eps; ss << ", output_mask=" << output_mask; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_out, const torch::lazy::Value& input, const ::std::optional& weight, const ::std::optional& running_mean, const ::std::optional& running_var, const ::std::optional& save_mean, const ::std::optional& save_invstd, const bool& train, const double& eps, const ::std::vector& output_mask) const { size_t i = 0; return (operand(i++) == grad_out && operand(i++) == input && nullable_operand(i++) == weight.value_or(kNullValue) && nullable_operand(i++) == running_mean.value_or(kNullValue) && nullable_operand(i++) == running_var.value_or(kNullValue) && nullable_operand(i++) == save_mean.value_or(kNullValue) && nullable_operand(i++) == save_invstd.value_or(kNullValue) && this->train == train && this->eps == eps && this->output_mask == output_mask); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(10); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_weight ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_running_mean ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_running_var ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_save_mean ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_save_invstd ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("train", train); arguments.emplace_back("eps", eps); arguments.emplace_back("output_mask", output_mask); torch::lazy::TSOpVector native_batch_norm_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(native_batch_norm_backward_out.size(), 3); return native_batch_norm_backward_out; } bool train; double eps; ::std::vector output_mask; bool has_weight: 1; bool has_running_mean: 1; bool has_running_var: 1; bool has_save_mean: 1; bool has_save_invstd: 1; }; class NativeDropout : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::native_dropout); } NativeDropout(const torch::lazy::Value& input, const double& p, const ::std::optional& train, std::vector&& shapes) : TsNode( NativeDropout::ClassOpKind(), OpList{input}, std::move(shapes), /* num_outputs */ 2, torch::lazy::MHash(p, train)), p(p), train(train) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", p=" << p; if (train.has_value()) { ss << ", train=" << train.value(); } else { ss << ", train=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& input, const double& p, const ::std::optional& train) const { size_t i = 0; return (operand(i++) == input && this->p == p && ((!this->train&&!train) || (this->train&&train && *(this->train) == *train))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("p", p); arguments.emplace_back("train", train); torch::lazy::TSOpVector native_dropout_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(native_dropout_out.size(), 2); return native_dropout_out; } double p; ::std::optional train; }; class NativeDropoutBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::native_dropout_backward); } NativeDropoutBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& mask, const double& scale, std::vector&& shapes) : TsNode( NativeDropoutBackward::ClassOpKind(), OpList{grad_output, mask}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(scale)), scale(scale) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", scale=" << scale; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& mask, const double& scale) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == mask && this->scale == scale); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("scale", scale); torch::lazy::TSOpVector native_dropout_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(native_dropout_backward_out.size(), 1); return native_dropout_backward_out; } double scale; }; class NativeLayerNorm : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::native_layer_norm); } NativeLayerNorm(const torch::lazy::Value& input, const ::std::vector& normalized_shape, const ::std::optional& weight, const ::std::optional& bias, const double& eps, std::vector&& shapes) : TsNode( NativeLayerNorm::ClassOpKind(), OpList{input, weight.value_or(kNullValue), bias.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 3, torch::lazy::MHash(normalized_shape, eps)), normalized_shape(normalized_shape), eps(eps) { has_weight = !!weight; has_bias = !!bias; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", normalized_shape=" << normalized_shape; ss << ", eps=" << eps; return ss.str(); } bool CanBeReused(const torch::lazy::Value& input, const ::std::vector& normalized_shape, const ::std::optional& weight, const ::std::optional& bias, const double& eps) const { size_t i = 0; return (operand(i++) == input && nullable_operand(i++) == weight.value_or(kNullValue) && nullable_operand(i++) == bias.value_or(kNullValue) && this->normalized_shape == normalized_shape && this->eps == eps); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("normalized_shape", normalized_shape); arguments.emplace_back(has_weight ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_bias ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("eps", eps); torch::lazy::TSOpVector native_layer_norm_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(native_layer_norm_out.size(), 3); return native_layer_norm_out; } ::std::vector normalized_shape; double eps; bool has_weight: 1; bool has_bias: 1; }; class NativeLayerNormBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::native_layer_norm_backward); } NativeLayerNormBackward(const torch::lazy::Value& grad_out, const torch::lazy::Value& input, const ::std::vector& normalized_shape, const torch::lazy::Value& mean, const torch::lazy::Value& rstd, const ::std::optional& weight, const ::std::optional& bias, const ::std::vector& output_mask, std::vector&& shapes) : TsNode( NativeLayerNormBackward::ClassOpKind(), OpList{grad_out, input, mean, rstd, weight.value_or(kNullValue), bias.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 3, torch::lazy::MHash(normalized_shape, output_mask)), normalized_shape(normalized_shape), output_mask(output_mask) { has_weight = !!weight; has_bias = !!bias; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", normalized_shape=" << normalized_shape; ss << ", output_mask=" << output_mask; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_out, const torch::lazy::Value& input, const ::std::vector& normalized_shape, const torch::lazy::Value& mean, const torch::lazy::Value& rstd, const ::std::optional& weight, const ::std::optional& bias, const ::std::vector& output_mask) const { size_t i = 0; return (operand(i++) == grad_out && operand(i++) == input && operand(i++) == mean && operand(i++) == rstd && nullable_operand(i++) == weight.value_or(kNullValue) && nullable_operand(i++) == bias.value_or(kNullValue) && this->normalized_shape == normalized_shape && this->output_mask == output_mask); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(8); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("normalized_shape", normalized_shape); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_weight ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_bias ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("output_mask", output_mask); torch::lazy::TSOpVector native_layer_norm_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(native_layer_norm_backward_out.size(), 3); return native_layer_norm_backward_out; } ::std::vector normalized_shape; ::std::vector output_mask; bool has_weight: 1; bool has_bias: 1; }; class NeScalar : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::ne); } NeScalar(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( NeScalar::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector ne_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(ne_out.size(), 1); return ne_out; } }; class NeTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::ne); } NeTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( NeTensor::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector ne_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(ne_out.size(), 1); return ne_out; } }; class Neg : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::neg); } Neg(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Neg::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector neg_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(neg_out.size(), 1); return neg_out; } }; class NllLoss2dBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::nll_loss2d_backward); } NllLoss2dBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction, const int64_t& ignore_index, const torch::lazy::Value& total_weight, std::vector&& shapes) : TsNode( NllLoss2dBackward::ClassOpKind(), OpList{grad_output, self, target, weight.value_or(kNullValue), total_weight}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(reduction, ignore_index)), reduction(reduction), ignore_index(ignore_index) { has_weight = !!weight; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", reduction=" << reduction; ss << ", ignore_index=" << ignore_index; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction, const int64_t& ignore_index, const torch::lazy::Value& total_weight) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && operand(i++) == target && nullable_operand(i++) == weight.value_or(kNullValue) && operand(i++) == total_weight && this->reduction == reduction && this->ignore_index == ignore_index); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(7); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_weight ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("reduction", reduction); arguments.emplace_back("ignore_index", ignore_index); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector nll_loss2d_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(nll_loss2d_backward_out.size(), 1); return nll_loss2d_backward_out; } int64_t reduction; int64_t ignore_index; bool has_weight: 1; }; class NllLoss2dForward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::nll_loss2d_forward); } NllLoss2dForward(const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction, const int64_t& ignore_index, std::vector&& shapes) : TsNode( NllLoss2dForward::ClassOpKind(), OpList{self, target, weight.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 2, torch::lazy::MHash(reduction, ignore_index)), reduction(reduction), ignore_index(ignore_index) { has_weight = !!weight; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", reduction=" << reduction; ss << ", ignore_index=" << ignore_index; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction, const int64_t& ignore_index) const { size_t i = 0; return (operand(i++) == self && operand(i++) == target && nullable_operand(i++) == weight.value_or(kNullValue) && this->reduction == reduction && this->ignore_index == ignore_index); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_weight ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("reduction", reduction); arguments.emplace_back("ignore_index", ignore_index); torch::lazy::TSOpVector nll_loss2d_forward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(nll_loss2d_forward_out.size(), 2); return nll_loss2d_forward_out; } int64_t reduction; int64_t ignore_index; bool has_weight: 1; }; class NllLossBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::nll_loss_backward); } NllLossBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction, const int64_t& ignore_index, const torch::lazy::Value& total_weight, std::vector&& shapes) : TsNode( NllLossBackward::ClassOpKind(), OpList{grad_output, self, target, weight.value_or(kNullValue), total_weight}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(reduction, ignore_index)), reduction(reduction), ignore_index(ignore_index) { has_weight = !!weight; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", reduction=" << reduction; ss << ", ignore_index=" << ignore_index; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction, const int64_t& ignore_index, const torch::lazy::Value& total_weight) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && operand(i++) == target && nullable_operand(i++) == weight.value_or(kNullValue) && operand(i++) == total_weight && this->reduction == reduction && this->ignore_index == ignore_index); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(7); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_weight ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("reduction", reduction); arguments.emplace_back("ignore_index", ignore_index); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector nll_loss_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(nll_loss_backward_out.size(), 1); return nll_loss_backward_out; } int64_t reduction; int64_t ignore_index; bool has_weight: 1; }; class NllLossForward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::nll_loss_forward); } NllLossForward(const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction, const int64_t& ignore_index, std::vector&& shapes) : TsNode( NllLossForward::ClassOpKind(), OpList{self, target, weight.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 2, torch::lazy::MHash(reduction, ignore_index)), reduction(reduction), ignore_index(ignore_index) { has_weight = !!weight; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", reduction=" << reduction; ss << ", ignore_index=" << ignore_index; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& target, const ::std::optional& weight, const int64_t& reduction, const int64_t& ignore_index) const { size_t i = 0; return (operand(i++) == self && operand(i++) == target && nullable_operand(i++) == weight.value_or(kNullValue) && this->reduction == reduction && this->ignore_index == ignore_index); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_weight ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("reduction", reduction); arguments.emplace_back("ignore_index", ignore_index); torch::lazy::TSOpVector nll_loss_forward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(nll_loss_forward_out.size(), 2); return nll_loss_forward_out; } int64_t reduction; int64_t ignore_index; bool has_weight: 1; }; class Nonzero : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::nonzero); } Nonzero(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Nonzero::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector nonzero_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(nonzero_out.size(), 1); return nonzero_out; } }; class NormScalaroptDim : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::norm); } NormScalaroptDim(const torch::lazy::Value& self, const ::std::optional& p, const ::std::vector& dim, const bool& keepdim, std::vector&& shapes) : TsNode( NormScalaroptDim::ClassOpKind(), OpList{self, p.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, keepdim)), dim(dim), keepdim(keepdim) { has_p = !!p; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; ss << ", keepdim=" << keepdim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::optional& p, const ::std::vector& dim, const bool& keepdim) const { size_t i = 0; return (operand(i++) == self && nullable_operand(i++) == p.value_or(kNullValue) && this->dim == dim && this->keepdim == keepdim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(has_p ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back("dim", dim); arguments.emplace_back("keepdim", keepdim); torch::lazy::TSOpVector norm_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(norm_out.size(), 1); return norm_out; } ::std::vector dim; bool keepdim; bool has_p: 1; }; class NormalFunctional : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::normal_functional); } NormalFunctional(const torch::lazy::Value& self, const double& mean, const double& std, const ::std::optional& generator, std::vector&& shapes) : TsNode( NormalFunctional::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(mean, std, generator)), mean(mean), std(std), generator(generator) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", mean=" << mean; ss << ", std=" << std; if (generator.has_value()) { ss << ", generator=" << "torch.Generator()"; } else { ss << ", generator=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const double& mean, const double& std, const ::std::optional& generator) const { size_t i = 0; return (operand(i++) == self && this->mean == mean && this->std == std && ((!this->generator&&!generator) || (this->generator&&generator && *(this->generator) == *generator))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("mean", mean); arguments.emplace_back("std", std); kwarguments.emplace_back("generator", generator); torch::lazy::TSOpVector normal_functional_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(normal_functional_out.size(), 1); return normal_functional_out; } double mean; double std; ::std::optional generator; }; class PermuteCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::permute_copy); } PermuteCopy(const torch::lazy::Value& self, const ::std::vector& dims, std::vector&& shapes) : TsNode( PermuteCopy::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dims)), dims(dims) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dims=" << dims; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& dims) const { size_t i = 0; return (operand(i++) == self && this->dims == dims); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dims", dims); torch::lazy::TSOpVector permute_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(permute_copy_out.size(), 1); return permute_copy_out; } ::std::vector dims; }; class PowTensorTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::pow); } PowTensorTensor(const torch::lazy::Value& self, const torch::lazy::Value& exponent, std::vector&& shapes) : TsNode( PowTensorTensor::ClassOpKind(), OpList{self, exponent}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& exponent) const { size_t i = 0; return (operand(i++) == self && operand(i++) == exponent); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector pow_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(pow_out.size(), 1); return pow_out; } }; class PowTensorScalar : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::pow); } PowTensorScalar(const torch::lazy::Value& self, const torch::lazy::Value& exponent, std::vector&& shapes) : TsNode( PowTensorScalar::ClassOpKind(), OpList{self, exponent}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& exponent) const { size_t i = 0; return (operand(i++) == self && operand(i++) == exponent); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector pow_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(pow_out.size(), 1); return pow_out; } }; class RandomFrom : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::random); } RandomFrom(const torch::lazy::Value& self, const int64_t& from, const ::std::optional& to, const ::std::optional& generator, std::vector&& shapes) : TsNode( RandomFrom::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(from, to, generator)), from(from), to(to), generator(generator) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", from=" << from; if (to.has_value()) { ss << ", to=" << to.value(); } else { ss << ", to=null"; } if (generator.has_value()) { ss << ", generator=" << "torch.Generator()"; } else { ss << ", generator=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& from, const ::std::optional& to, const ::std::optional& generator) const { size_t i = 0; return (operand(i++) == self && this->from == from && ((!this->to&&!to) || (this->to&&to && *(this->to) == *to)) && ((!this->generator&&!generator) || (this->generator&&generator && *(this->generator) == *generator))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("from", from); arguments.emplace_back("to", to); kwarguments.emplace_back("generator", generator); torch::lazy::TSOpVector random_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(random_out.size(), 1); return random_out; } int64_t from; ::std::optional to; ::std::optional generator; }; class RandomTo : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::random); } RandomTo(const torch::lazy::Value& self, const int64_t& to, const ::std::optional& generator, std::vector&& shapes) : TsNode( RandomTo::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(to, generator)), to(to), generator(generator) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", to=" << to; if (generator.has_value()) { ss << ", generator=" << "torch.Generator()"; } else { ss << ", generator=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& to, const ::std::optional& generator) const { size_t i = 0; return (operand(i++) == self && this->to == to && ((!this->generator&&!generator) || (this->generator&&generator && *(this->generator) == *generator))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("to", to); kwarguments.emplace_back("generator", generator); torch::lazy::TSOpVector random_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(random_out.size(), 1); return random_out; } int64_t to; ::std::optional generator; }; class Random : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::random); } Random(const torch::lazy::Value& self, const ::std::optional& generator, std::vector&& shapes) : TsNode( Random::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(generator)), generator(generator) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); if (generator.has_value()) { ss << ", generator=" << "torch.Generator()"; } else { ss << ", generator=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::optional& generator) const { size_t i = 0; return (operand(i++) == self && ((!this->generator&&!generator) || (this->generator&&generator && *(this->generator) == *generator))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("generator", generator); torch::lazy::TSOpVector random_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(random_out.size(), 1); return random_out; } ::std::optional generator; }; class Reciprocal : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::reciprocal); } Reciprocal(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Reciprocal::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector reciprocal_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(reciprocal_out.size(), 1); return reciprocal_out; } }; class Relu : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::relu); } Relu(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Relu::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector relu_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(relu_out.size(), 1); return relu_out; } }; class RemainderTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::remainder); } RemainderTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, std::vector&& shapes) : TsNode( RemainderTensor::ClassOpKind(), OpList{self, other}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector remainder_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(remainder_out.size(), 1); return remainder_out; } }; class Repeat : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::repeat); } Repeat(const torch::lazy::Value& self, const ::std::vector& repeats, std::vector&& shapes) : TsNode( Repeat::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(repeats)), repeats(repeats) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", repeats=" << repeats; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& repeats) const { size_t i = 0; return (operand(i++) == self && this->repeats == repeats); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("repeats", repeats); torch::lazy::TSOpVector repeat_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(repeat_out.size(), 1); return repeat_out; } ::std::vector repeats; }; class Rsqrt : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::rsqrt); } Rsqrt(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Rsqrt::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector rsqrt_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(rsqrt_out.size(), 1); return rsqrt_out; } }; class ScatterAdd : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::scatter_add); } ScatterAdd(const torch::lazy::Value& self, const int64_t& dim, const torch::lazy::Value& index, const torch::lazy::Value& src, std::vector&& shapes) : TsNode( ScatterAdd::ClassOpKind(), OpList{self, index, src}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim, const torch::lazy::Value& index, const torch::lazy::Value& src) const { size_t i = 0; return (operand(i++) == self && operand(i++) == index && operand(i++) == src && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector scatter_add_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(scatter_add_out.size(), 1); return scatter_add_out; } int64_t dim; }; class SelectCopyInt : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::select_copy); } SelectCopyInt(const torch::lazy::Value& self, const int64_t& dim, const int64_t& index, std::vector&& shapes) : TsNode( SelectCopyInt::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, index)), dim(dim), index(index) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; ss << ", index=" << index; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim, const int64_t& index) const { size_t i = 0; return (operand(i++) == self && this->dim == dim && this->index == index); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back("index", index); torch::lazy::TSOpVector select_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(select_copy_out.size(), 1); return select_copy_out; } int64_t dim; int64_t index; }; class SelectScatter : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::select_scatter); } SelectScatter(const torch::lazy::Value& self, const torch::lazy::Value& src, const int64_t& dim, const int64_t& index, std::vector&& shapes) : TsNode( SelectScatter::ClassOpKind(), OpList{self, src}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, index)), dim(dim), index(index) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; ss << ", index=" << index; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& src, const int64_t& dim, const int64_t& index) const { size_t i = 0; return (operand(i++) == self && operand(i++) == src && this->dim == dim && this->index == index); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back("index", index); torch::lazy::TSOpVector select_scatter_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(select_scatter_out.size(), 1); return select_scatter_out; } int64_t dim; int64_t index; }; class Selu : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::selu); } Selu(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Selu::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector selu_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(selu_out.size(), 1); return selu_out; } }; class Sgn : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::sgn); } Sgn(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Sgn::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector sgn_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(sgn_out.size(), 1); return sgn_out; } }; class Sigmoid : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::sigmoid); } Sigmoid(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Sigmoid::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector sigmoid_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(sigmoid_out.size(), 1); return sigmoid_out; } }; class SigmoidBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(c10::Symbol::fromQualString("aten::sigmoid_backward")); } SigmoidBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& output, std::vector&& shapes) : TsNode( SigmoidBackward::ClassOpKind(), OpList{grad_output, output}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& output) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == output); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector sigmoid_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(sigmoid_backward_out.size(), 1); return sigmoid_backward_out; } }; class Silu : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::silu); } Silu(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Silu::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector silu_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(silu_out.size(), 1); return silu_out; } }; class SliceCopyTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::slice_copy); } SliceCopyTensor(const torch::lazy::Value& self, const int64_t& dim, const ::std::optional& start, const ::std::optional& end, const torch::lazy::Value& step, std::vector&& shapes) : TsNode( SliceCopyTensor::ClassOpKind(), OpList{self, start.value_or(kNullValue), end.value_or(kNullValue), step}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { has_start = !!start; has_end = !!end; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim, const ::std::optional& start, const ::std::optional& end, const torch::lazy::Value& step) const { size_t i = 0; return (operand(i++) == self && nullable_operand(i++) == start.value_or(kNullValue) && nullable_operand(i++) == end.value_or(kNullValue) && operand(i++) == step && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back(has_start ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_end ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector slice_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(slice_copy_out.size(), 1); return slice_copy_out; } int64_t dim; bool has_start: 1; bool has_end: 1; }; class SliceScatter : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::slice_scatter); } SliceScatter(const torch::lazy::Value& self, const torch::lazy::Value& src, const int64_t& dim, const ::std::optional& start, const ::std::optional& end, const torch::lazy::Value& step, std::vector&& shapes) : TsNode( SliceScatter::ClassOpKind(), OpList{self, src, start.value_or(kNullValue), end.value_or(kNullValue), step}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { has_start = !!start; has_end = !!end; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& src, const int64_t& dim, const ::std::optional& start, const ::std::optional& end, const torch::lazy::Value& step) const { size_t i = 0; return (operand(i++) == self && operand(i++) == src && nullable_operand(i++) == start.value_or(kNullValue) && nullable_operand(i++) == end.value_or(kNullValue) && operand(i++) == step && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(6); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back(has_start ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(has_end ? loctx->GetOutputOp(operand(i++)) : nullptr); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector slice_scatter_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(slice_scatter_out.size(), 1); return slice_scatter_out; } int64_t dim; bool has_start: 1; bool has_end: 1; }; class SmoothL1Loss : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::smooth_l1_loss); } SmoothL1Loss(const torch::lazy::Value& self, const torch::lazy::Value& target, const int64_t& reduction, const double& beta, std::vector&& shapes) : TsNode( SmoothL1Loss::ClassOpKind(), OpList{self, target}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(reduction, beta)), reduction(reduction), beta(beta) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", reduction=" << reduction; ss << ", beta=" << beta; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& target, const int64_t& reduction, const double& beta) const { size_t i = 0; return (operand(i++) == self && operand(i++) == target && this->reduction == reduction && this->beta == beta); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("reduction", reduction); arguments.emplace_back("beta", beta); torch::lazy::TSOpVector smooth_l1_loss_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(smooth_l1_loss_out.size(), 1); return smooth_l1_loss_out; } int64_t reduction; double beta; }; class SmoothL1LossBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::smooth_l1_loss_backward); } SmoothL1LossBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& target, const int64_t& reduction, const double& beta, std::vector&& shapes) : TsNode( SmoothL1LossBackward::ClassOpKind(), OpList{grad_output, self, target}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(reduction, beta)), reduction(reduction), beta(beta) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", reduction=" << reduction; ss << ", beta=" << beta; return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& target, const int64_t& reduction, const double& beta) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && operand(i++) == target && this->reduction == reduction && this->beta == beta); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("reduction", reduction); arguments.emplace_back("beta", beta); torch::lazy::TSOpVector smooth_l1_loss_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(smooth_l1_loss_backward_out.size(), 1); return smooth_l1_loss_backward_out; } int64_t reduction; double beta; }; class Softplus : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::softplus); } Softplus(const torch::lazy::Value& self, const torch::lazy::Value& beta, const torch::lazy::Value& threshold, std::vector&& shapes) : TsNode( Softplus::ClassOpKind(), OpList{self, beta, threshold}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& beta, const torch::lazy::Value& threshold) const { size_t i = 0; return (operand(i++) == self && operand(i++) == beta && operand(i++) == threshold); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector softplus_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(softplus_out.size(), 1); return softplus_out; } }; class SoftplusBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::softplus_backward); } SoftplusBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& beta, const torch::lazy::Value& threshold, std::vector&& shapes) : TsNode( SoftplusBackward::ClassOpKind(), OpList{grad_output, self, beta, threshold}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& beta, const torch::lazy::Value& threshold) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && operand(i++) == beta && operand(i++) == threshold); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector softplus_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(softplus_backward_out.size(), 1); return softplus_backward_out; } }; class Sort : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::sort); } Sort(const torch::lazy::Value& self, const int64_t& dim, const bool& descending, std::vector&& shapes) : TsNode( Sort::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 2, torch::lazy::MHash(dim, descending)), dim(dim), descending(descending) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; ss << ", descending=" << descending; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim, const bool& descending) const { size_t i = 0; return (operand(i++) == self && this->dim == dim && this->descending == descending); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back("descending", descending); torch::lazy::TSOpVector sort_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(sort_out.size(), 2); return sort_out; } int64_t dim; bool descending; }; class Sqrt : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::sqrt); } Sqrt(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Sqrt::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector sqrt_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(sqrt_out.size(), 1); return sqrt_out; } }; class SqueezeCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::squeeze_copy); } SqueezeCopy(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( SqueezeCopy::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector squeeze_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(squeeze_copy_out.size(), 1); return squeeze_copy_out; } }; class SqueezeCopyDim : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::squeeze_copy); } SqueezeCopyDim(const torch::lazy::Value& self, const int64_t& dim, std::vector&& shapes) : TsNode( SqueezeCopyDim::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim) const { size_t i = 0; return (operand(i++) == self && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); torch::lazy::TSOpVector squeeze_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(squeeze_copy_out.size(), 1); return squeeze_copy_out; } int64_t dim; }; class SqueezeCopyDims : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::squeeze_copy); } SqueezeCopyDims(const torch::lazy::Value& self, const ::std::vector& dim, std::vector&& shapes) : TsNode( SqueezeCopyDims::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& dim) const { size_t i = 0; return (operand(i++) == self && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); torch::lazy::TSOpVector squeeze_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(squeeze_copy_out.size(), 1); return squeeze_copy_out; } ::std::vector dim; }; class Stack : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::stack); } Stack(const torch::lazy::Value& tensors, const int64_t& dim, std::vector&& shapes) : TsNode( Stack::ClassOpKind(), OpList{tensors}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& tensors, const int64_t& dim) const { size_t i = 0; return (operand(i++) == tensors && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); torch::lazy::TSOpVector stack_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(stack_out.size(), 1); return stack_out; } int64_t dim; }; class Std : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::std); } Std(const torch::lazy::Value& self, const bool& unbiased, std::vector&& shapes) : TsNode( Std::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(unbiased)), unbiased(unbiased) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", unbiased=" << unbiased; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const bool& unbiased) const { size_t i = 0; return (operand(i++) == self && this->unbiased == unbiased); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("unbiased", unbiased); torch::lazy::TSOpVector std_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(std_out.size(), 1); return std_out; } bool unbiased; }; class StdDim : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::std); } StdDim(const torch::lazy::Value& self, const ::std::optional<::std::vector>& dim, const bool& unbiased, const bool& keepdim, std::vector&& shapes) : TsNode( StdDim::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, unbiased, keepdim)), dim(dim), unbiased(unbiased), keepdim(keepdim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); if (dim.has_value()) { ss << ", dim=" << dim.value(); } else { ss << ", dim=null"; } ss << ", unbiased=" << unbiased; ss << ", keepdim=" << keepdim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::optional<::std::vector>& dim, const bool& unbiased, const bool& keepdim) const { size_t i = 0; return (operand(i++) == self && ((!this->dim&&!dim) || (this->dim&&dim && *(this->dim) == *dim)) && this->unbiased == unbiased && this->keepdim == keepdim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back("unbiased", unbiased); arguments.emplace_back("keepdim", keepdim); torch::lazy::TSOpVector std_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(std_out.size(), 1); return std_out; } ::std::optional<::std::vector> dim; bool unbiased; bool keepdim; }; class StdCorrection : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::std); } StdCorrection(const torch::lazy::Value& self, const ::std::optional<::std::vector>& dim, const ::std::optional& correction, const bool& keepdim, std::vector&& shapes) : TsNode( StdCorrection::ClassOpKind(), OpList{self, correction.value_or(kNullValue)}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, keepdim)), dim(dim), keepdim(keepdim) { has_correction = !!correction; } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); if (dim.has_value()) { ss << ", dim=" << dim.value(); } else { ss << ", dim=null"; } ss << ", keepdim=" << keepdim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::optional<::std::vector>& dim, const ::std::optional& correction, const bool& keepdim) const { size_t i = 0; return (operand(i++) == self && nullable_operand(i++) == correction.value_or(kNullValue) && ((!this->dim&&!dim) || (this->dim&&dim && *(this->dim) == *dim)) && this->keepdim == keepdim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(2); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); kwarguments.emplace_back("correction", has_correction ? loctx->GetOutputOp(operand(i++)) : nullptr); kwarguments.emplace_back("keepdim", keepdim); torch::lazy::TSOpVector std_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(std_out.size(), 1); return std_out; } ::std::optional<::std::vector> dim; bool keepdim; bool has_correction: 1; }; class SubTensor : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::sub); } SubTensor(const torch::lazy::Value& self, const torch::lazy::Value& other, const torch::lazy::Value& alpha, std::vector&& shapes) : TsNode( SubTensor::ClassOpKind(), OpList{self, other, alpha}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& other, const torch::lazy::Value& alpha) const { size_t i = 0; return (operand(i++) == self && operand(i++) == other && operand(i++) == alpha); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("alpha", loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector sub_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(sub_out.size(), 1); return sub_out; } }; class Sum : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::sum); } Sum(const torch::lazy::Value& self, const ::std::optional& dtype, std::vector&& shapes) : TsNode( Sum::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dtype)), dtype(dtype) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); if (dtype.has_value()) { ss << ", dtype=" << dtype.value(); } else { ss << ", dtype=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::optional& dtype) const { size_t i = 0; return (operand(i++) == self && ((!this->dtype&&!dtype) || (this->dtype&&dtype && *(this->dtype) == *dtype))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); kwarguments.emplace_back("dtype", dtype); torch::lazy::TSOpVector sum_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(sum_out.size(), 1); return sum_out; } ::std::optional dtype; }; class SumDimIntlist : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::sum); } SumDimIntlist(const torch::lazy::Value& self, const ::std::optional<::std::vector>& dim, const bool& keepdim, const ::std::optional& dtype, std::vector&& shapes) : TsNode( SumDimIntlist::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim, keepdim, dtype)), dim(dim), keepdim(keepdim), dtype(dtype) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); if (dim.has_value()) { ss << ", dim=" << dim.value(); } else { ss << ", dim=null"; } ss << ", keepdim=" << keepdim; if (dtype.has_value()) { ss << ", dtype=" << dtype.value(); } else { ss << ", dtype=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::optional<::std::vector>& dim, const bool& keepdim, const ::std::optional& dtype) const { size_t i = 0; return (operand(i++) == self && ((!this->dim&&!dim) || (this->dim&&dim && *(this->dim) == *dim)) && this->keepdim == keepdim && ((!this->dtype&&!dtype) || (this->dtype&&dtype && *(this->dtype) == *dtype))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); arguments.emplace_back("keepdim", keepdim); kwarguments.emplace_back("dtype", dtype); torch::lazy::TSOpVector sum_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(sum_out.size(), 1); return sum_out; } ::std::optional<::std::vector> dim; bool keepdim; ::std::optional dtype; }; class TCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::t_copy); } TCopy(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( TCopy::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector t_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(t_copy_out.size(), 1); return t_copy_out; } }; class Tanh : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::tanh); } Tanh(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Tanh::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector tanh_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(tanh_out.size(), 1); return tanh_out; } }; class TanhBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::tanh_backward); } TanhBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& output, std::vector&& shapes) : TsNode( TanhBackward::ClassOpKind(), OpList{grad_output, output}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& output) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == output); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector tanh_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(tanh_backward_out.size(), 1); return tanh_backward_out; } }; class Threshold : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::threshold); } Threshold(const torch::lazy::Value& self, const torch::lazy::Value& threshold, const torch::lazy::Value& value, std::vector&& shapes) : TsNode( Threshold::ClassOpKind(), OpList{self, threshold, value}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const torch::lazy::Value& threshold, const torch::lazy::Value& value) const { size_t i = 0; return (operand(i++) == self && operand(i++) == threshold && operand(i++) == value); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector threshold_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(threshold_out.size(), 1); return threshold_out; } }; class ThresholdBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::threshold_backward); } ThresholdBackward(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& threshold, std::vector&& shapes) : TsNode( ThresholdBackward::ClassOpKind(), OpList{grad_output, self, threshold}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const torch::lazy::Value& self, const torch::lazy::Value& threshold) const { size_t i = 0; return (operand(i++) == grad_output && operand(i++) == self && operand(i++) == threshold); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector threshold_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(threshold_backward_out.size(), 1); return threshold_backward_out; } }; class Topk : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::topk); } Topk(const torch::lazy::Value& self, const int64_t& k, const int64_t& dim, const bool& largest, const bool& sorted, std::vector&& shapes) : TsNode( Topk::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 2, torch::lazy::MHash(k, dim, largest, sorted)), k(k), dim(dim), largest(largest), sorted(sorted) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", k=" << k; ss << ", dim=" << dim; ss << ", largest=" << largest; ss << ", sorted=" << sorted; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& k, const int64_t& dim, const bool& largest, const bool& sorted) const { size_t i = 0; return (operand(i++) == self && this->k == k && this->dim == dim && this->largest == largest && this->sorted == sorted); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("k", k); arguments.emplace_back("dim", dim); arguments.emplace_back("largest", largest); arguments.emplace_back("sorted", sorted); torch::lazy::TSOpVector topk_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(topk_out.size(), 2); return topk_out; } int64_t k; int64_t dim; bool largest; bool sorted; }; class Trace : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::trace); } Trace(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Trace::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector trace_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(trace_out.size(), 1); return trace_out; } }; class TransposeCopyInt : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::transpose_copy); } TransposeCopyInt(const torch::lazy::Value& self, const int64_t& dim0, const int64_t& dim1, std::vector&& shapes) : TsNode( TransposeCopyInt::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim0, dim1)), dim0(dim0), dim1(dim1) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim0=" << dim0; ss << ", dim1=" << dim1; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim0, const int64_t& dim1) const { size_t i = 0; return (operand(i++) == self && this->dim0 == dim0 && this->dim1 == dim1); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim0", dim0); arguments.emplace_back("dim1", dim1); torch::lazy::TSOpVector transpose_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(transpose_copy_out.size(), 1); return transpose_copy_out; } int64_t dim0; int64_t dim1; }; class Tril : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::tril); } Tril(const torch::lazy::Value& self, const int64_t& diagonal, std::vector&& shapes) : TsNode( Tril::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(diagonal)), diagonal(diagonal) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", diagonal=" << diagonal; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& diagonal) const { size_t i = 0; return (operand(i++) == self && this->diagonal == diagonal); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("diagonal", diagonal); torch::lazy::TSOpVector tril_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(tril_out.size(), 1); return tril_out; } int64_t diagonal; }; class Triu : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::triu); } Triu(const torch::lazy::Value& self, const int64_t& diagonal, std::vector&& shapes) : TsNode( Triu::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(diagonal)), diagonal(diagonal) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", diagonal=" << diagonal; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& diagonal) const { size_t i = 0; return (operand(i++) == self && this->diagonal == diagonal); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("diagonal", diagonal); torch::lazy::TSOpVector triu_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(triu_out.size(), 1); return triu_out; } int64_t diagonal; }; class Trunc : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::trunc); } Trunc(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Trunc::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector trunc_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(trunc_out.size(), 1); return trunc_out; } }; class UnfoldCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::unfold_copy); } UnfoldCopy(const torch::lazy::Value& self, const int64_t& dimension, const int64_t& size, const int64_t& step, std::vector&& shapes) : TsNode( UnfoldCopy::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dimension, size, step)), dimension(dimension), size(size), step(step) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dimension=" << dimension; ss << ", size=" << size; ss << ", step=" << step; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dimension, const int64_t& size, const int64_t& step) const { size_t i = 0; return (operand(i++) == self && this->dimension == dimension && this->size == size && this->step == step); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dimension", dimension); arguments.emplace_back("size", size); arguments.emplace_back("step", step); torch::lazy::TSOpVector unfold_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(unfold_copy_out.size(), 1); return unfold_copy_out; } int64_t dimension; int64_t size; int64_t step; }; class Uniform : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::uniform); } Uniform(const torch::lazy::Value& self, const double& from, const double& to, const ::std::optional& generator, std::vector&& shapes) : TsNode( Uniform::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(from, to, generator)), from(from), to(to), generator(generator) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", from=" << from; ss << ", to=" << to; if (generator.has_value()) { ss << ", generator=" << "torch.Generator()"; } else { ss << ", generator=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const double& from, const double& to, const ::std::optional& generator) const { size_t i = 0; return (operand(i++) == self && this->from == from && this->to == to && ((!this->generator&&!generator) || (this->generator&&generator && *(this->generator) == *generator))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(3); kwarguments.reserve(1); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("from", from); arguments.emplace_back("to", to); kwarguments.emplace_back("generator", generator); torch::lazy::TSOpVector uniform_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(uniform_out.size(), 1); return uniform_out; } double from; double to; ::std::optional generator; }; class UnsqueezeCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::unsqueeze_copy); } UnsqueezeCopy(const torch::lazy::Value& self, const int64_t& dim, std::vector&& shapes) : TsNode( UnsqueezeCopy::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dim)), dim(dim) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dim=" << dim; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const int64_t& dim) const { size_t i = 0; return (operand(i++) == self && this->dim == dim); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dim", dim); torch::lazy::TSOpVector unsqueeze_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(unsqueeze_copy_out.size(), 1); return unsqueeze_copy_out; } int64_t dim; }; class UpsampleBilinear2d : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::upsample_bilinear2d); } UpsampleBilinear2d(const torch::lazy::Value& self, const ::std::vector& output_size, const bool& align_corners, const ::std::optional& scales_h, const ::std::optional& scales_w, std::vector&& shapes) : TsNode( UpsampleBilinear2d::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(output_size, align_corners, scales_h, scales_w)), output_size(output_size), align_corners(align_corners), scales_h(scales_h), scales_w(scales_w) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", output_size=" << output_size; ss << ", align_corners=" << align_corners; if (scales_h.has_value()) { ss << ", scales_h=" << scales_h.value(); } else { ss << ", scales_h=null"; } if (scales_w.has_value()) { ss << ", scales_w=" << scales_w.value(); } else { ss << ", scales_w=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& output_size, const bool& align_corners, const ::std::optional& scales_h, const ::std::optional& scales_w) const { size_t i = 0; return (operand(i++) == self && this->output_size == output_size && this->align_corners == align_corners && ((!this->scales_h&&!scales_h) || (this->scales_h&&scales_h && *(this->scales_h) == *scales_h)) && ((!this->scales_w&&!scales_w) || (this->scales_w&&scales_w && *(this->scales_w) == *scales_w))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("output_size", output_size); arguments.emplace_back("align_corners", align_corners); arguments.emplace_back("scales_h", scales_h); arguments.emplace_back("scales_w", scales_w); torch::lazy::TSOpVector upsample_bilinear2d_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(upsample_bilinear2d_out.size(), 1); return upsample_bilinear2d_out; } ::std::vector output_size; bool align_corners; ::std::optional scales_h; ::std::optional scales_w; }; class UpsampleBilinear2dBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::upsample_bilinear2d_backward); } UpsampleBilinear2dBackward(const torch::lazy::Value& grad_output, const ::std::vector& output_size, const ::std::vector& input_size, const bool& align_corners, const ::std::optional& scales_h, const ::std::optional& scales_w, std::vector&& shapes) : TsNode( UpsampleBilinear2dBackward::ClassOpKind(), OpList{grad_output}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(output_size, input_size, align_corners, scales_h, scales_w)), output_size(output_size), input_size(input_size), align_corners(align_corners), scales_h(scales_h), scales_w(scales_w) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", output_size=" << output_size; ss << ", input_size=" << input_size; ss << ", align_corners=" << align_corners; if (scales_h.has_value()) { ss << ", scales_h=" << scales_h.value(); } else { ss << ", scales_h=null"; } if (scales_w.has_value()) { ss << ", scales_w=" << scales_w.value(); } else { ss << ", scales_w=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const ::std::vector& output_size, const ::std::vector& input_size, const bool& align_corners, const ::std::optional& scales_h, const ::std::optional& scales_w) const { size_t i = 0; return (operand(i++) == grad_output && this->output_size == output_size && this->input_size == input_size && this->align_corners == align_corners && ((!this->scales_h&&!scales_h) || (this->scales_h&&scales_h && *(this->scales_h) == *scales_h)) && ((!this->scales_w&&!scales_w) || (this->scales_w&&scales_w && *(this->scales_w) == *scales_w))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(6); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("output_size", output_size); arguments.emplace_back("input_size", input_size); arguments.emplace_back("align_corners", align_corners); arguments.emplace_back("scales_h", scales_h); arguments.emplace_back("scales_w", scales_w); torch::lazy::TSOpVector upsample_bilinear2d_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(upsample_bilinear2d_backward_out.size(), 1); return upsample_bilinear2d_backward_out; } ::std::vector output_size; ::std::vector input_size; bool align_corners; ::std::optional scales_h; ::std::optional scales_w; }; class UpsampleNearest2d : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::upsample_nearest2d); } UpsampleNearest2d(const torch::lazy::Value& self, const ::std::vector& output_size, const ::std::optional& scales_h, const ::std::optional& scales_w, std::vector&& shapes) : TsNode( UpsampleNearest2d::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(output_size, scales_h, scales_w)), output_size(output_size), scales_h(scales_h), scales_w(scales_w) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", output_size=" << output_size; if (scales_h.has_value()) { ss << ", scales_h=" << scales_h.value(); } else { ss << ", scales_h=null"; } if (scales_w.has_value()) { ss << ", scales_w=" << scales_w.value(); } else { ss << ", scales_w=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& output_size, const ::std::optional& scales_h, const ::std::optional& scales_w) const { size_t i = 0; return (operand(i++) == self && this->output_size == output_size && ((!this->scales_h&&!scales_h) || (this->scales_h&&scales_h && *(this->scales_h) == *scales_h)) && ((!this->scales_w&&!scales_w) || (this->scales_w&&scales_w && *(this->scales_w) == *scales_w))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(4); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("output_size", output_size); arguments.emplace_back("scales_h", scales_h); arguments.emplace_back("scales_w", scales_w); torch::lazy::TSOpVector upsample_nearest2d_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(upsample_nearest2d_out.size(), 1); return upsample_nearest2d_out; } ::std::vector output_size; ::std::optional scales_h; ::std::optional scales_w; }; class UpsampleNearest2dBackward : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::upsample_nearest2d_backward); } UpsampleNearest2dBackward(const torch::lazy::Value& grad_output, const ::std::vector& output_size, const ::std::vector& input_size, const ::std::optional& scales_h, const ::std::optional& scales_w, std::vector&& shapes) : TsNode( UpsampleNearest2dBackward::ClassOpKind(), OpList{grad_output}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(output_size, input_size, scales_h, scales_w)), output_size(output_size), input_size(input_size), scales_h(scales_h), scales_w(scales_w) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", output_size=" << output_size; ss << ", input_size=" << input_size; if (scales_h.has_value()) { ss << ", scales_h=" << scales_h.value(); } else { ss << ", scales_h=null"; } if (scales_w.has_value()) { ss << ", scales_w=" << scales_w.value(); } else { ss << ", scales_w=null"; } return ss.str(); } bool CanBeReused(const torch::lazy::Value& grad_output, const ::std::vector& output_size, const ::std::vector& input_size, const ::std::optional& scales_h, const ::std::optional& scales_w) const { size_t i = 0; return (operand(i++) == grad_output && this->output_size == output_size && this->input_size == input_size && ((!this->scales_h&&!scales_h) || (this->scales_h&&scales_h && *(this->scales_h) == *scales_h)) && ((!this->scales_w&&!scales_w) || (this->scales_w&&scales_w && *(this->scales_w) == *scales_w))); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(5); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("output_size", output_size); arguments.emplace_back("input_size", input_size); arguments.emplace_back("scales_h", scales_h); arguments.emplace_back("scales_w", scales_w); torch::lazy::TSOpVector upsample_nearest2d_backward_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(upsample_nearest2d_backward_out.size(), 1); return upsample_nearest2d_backward_out; } ::std::vector output_size; ::std::vector input_size; ::std::optional scales_h; ::std::optional scales_w; }; class ViewCopy : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::view_copy); } ViewCopy(const torch::lazy::Value& self, const ::std::vector& size, std::vector&& shapes) : TsNode( ViewCopy::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(size)), size(size) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", size=" << size; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const ::std::vector& size) const { size_t i = 0; return (operand(i++) == self && this->size == size); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("size", size); torch::lazy::TSOpVector view_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(view_copy_out.size(), 1); return view_copy_out; } ::std::vector size; }; class ViewCopyDtype : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::view_copy); } ViewCopyDtype(const torch::lazy::Value& self, const at::ScalarType& dtype, std::vector&& shapes) : TsNode( ViewCopyDtype::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash(dtype)), dtype(dtype) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); ss << ", dtype=" << dtype; return ss.str(); } bool CanBeReused(const torch::lazy::Value& self, const at::ScalarType& dtype) const { size_t i = 0; return (operand(i++) == self && this->dtype == dtype); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(2); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); arguments.emplace_back("dtype", dtype); torch::lazy::TSOpVector view_copy_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(view_copy_out.size(), 1); return view_copy_out; } at::ScalarType dtype; }; class Zero : public TsNode { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(at::aten::zero); } Zero(const torch::lazy::Value& self, std::vector&& shapes) : TsNode( Zero::ClassOpKind(), OpList{self}, std::move(shapes), /* num_outputs */ 1, torch::lazy::MHash()) { } std::string ToString() const override { std::stringstream ss; ss << TsNode::ToString(); return ss.str(); } bool CanBeReused(const torch::lazy::Value& self) const { size_t i = 0; return (operand(i++) == self); } torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const override { std::vector arguments; std::vector kwarguments; arguments.reserve(1); kwarguments.reserve(0); size_t i = 0; arguments.emplace_back(loctx->GetOutputOp(operand(i++))); torch::lazy::TSOpVector zero_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments); TORCH_CHECK_EQ(zero_out.size(), 1); return zero_out; } }; } // namespace lazy } // namespace torch