#pragma once #ifdef USE_XNNPACK #include #include #include namespace at::native::xnnpack { namespace internal::convolution2d { c10::intrusive_ptr createConv2dClampPrePackOpContext( Tensor weight, std::optional bias, std::vector stride, std::vector padding, std::vector dilation, int64_t groups, const std::optional& output_min, const std::optional& output_max); c10::intrusive_ptr createConv2dTransposeClampPrePackOpContext( Tensor weight, std::optional bias, std::vector stride, std::vector padding, std::vector output_padding, std::vector dilation, int64_t groups, const std::optional& output_min, const std::optional& output_max); Tensor conv2d_clamp_run( const Tensor& input, const c10::intrusive_ptr& op_context); IValue unpack_prepacked_sizes_conv2d(const IValue& ivalue); Tensor conv2d_transpose_clamp_run( const Tensor& input, const c10::intrusive_ptr& op_context); ContextConv2D create( const Tensor& weight, const std::optional& bias, const IntArrayRef padding, const IntArrayRef output_padding, const IntArrayRef stride, const IntArrayRef dilation, const int64_t groups, const bool transposed, const float output_min, const float output_max); Tensor run(ContextConv2D& context, const Tensor& input); } // namespace internal::convolution2d Tensor convolution2d( const Tensor& input, const Tensor& weight, const Tensor& bias, const IntArrayRef padding, const IntArrayRef stride, const IntArrayRef dilation, const int64_t groups); } // namespace at::native::xnnpack #endif /* USE_XNNPACK */