// Auto-generated file. Do not edit! // Template: src/qs8-vadd/sse-mul32-ld32.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include #include #include "xnnpack/common.h" #include "xnnpack/intrinsics-polyfill.h" #include "xnnpack/unaligned.h" #include "xnnpack/vbinary.h" void xnn_qs8_vadd_minmax_ukernel__avx_mul32_ld32_u32( size_t batch, const int8_t* input_a, const int8_t* input_b, int8_t* output, const struct xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(int8_t) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m128i vbias = _mm_set1_epi32(params->scalar.bias); const __m128i va_multiplier = _mm_set1_epi32(params->scalar.a_multiplier); const __m128i vb_multiplier = _mm_set1_epi32(params->scalar.b_multiplier); const __m128i vshift = _mm_cvtsi32_si128((int) params->scalar.shift); const __m128i voutput_zero_point = _mm_set1_epi16(params->scalar.output_zero_point); const __m128i voutput_min = _mm_set1_epi8(params->scalar.output_min); const __m128i voutput_max = _mm_set1_epi8(params->scalar.output_max); XNN_FORCE_REALIZATION(vbias); XNN_FORCE_REALIZATION(va_multiplier); XNN_FORCE_REALIZATION(vb_multiplier); XNN_FORCE_REALIZATION(vshift); XNN_FORCE_REALIZATION(voutput_zero_point); XNN_FORCE_REALIZATION(voutput_min); XNN_FORCE_REALIZATION(voutput_max); for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) { const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a))); const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b))); const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4))); const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4))); const __m128i va89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8))); const __m128i vb89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8))); const __m128i vaCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12))); const __m128i vbCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12))); const __m128i vaGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 16))); const __m128i vbGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 16))); const __m128i vaKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 20))); const __m128i vbKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 20))); const __m128i vaOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 24))); const __m128i vbOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 24))); const __m128i vaSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 28))); const __m128i vbSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 28))); input_a += 32; input_b += 32; __m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier)); __m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier)); __m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier)); __m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier)); __m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_mullo_epi32(vaGHIJ, va_multiplier)); __m128i vaccKLMN = _mm_add_epi32(vbias, _mm_mullo_epi32(vaKLMN, va_multiplier)); __m128i vaccOPQR = _mm_add_epi32(vbias, _mm_mullo_epi32(vaOPQR, va_multiplier)); __m128i vaccSTUV = _mm_add_epi32(vbias, _mm_mullo_epi32(vaSTUV, va_multiplier)); vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier)); vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier)); vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vb89AB, vb_multiplier)); vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vbCDEF, vb_multiplier)); vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vbGHIJ, vb_multiplier)); vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vbKLMN, vb_multiplier)); vaccOPQR = _mm_add_epi32(vaccOPQR, _mm_mullo_epi32(vbOPQR, vb_multiplier)); vaccSTUV = _mm_add_epi32(vaccSTUV, _mm_mullo_epi32(vbSTUV, vb_multiplier)); vacc0123 = _mm_sra_epi32(vacc0123, vshift); vacc4567 = _mm_sra_epi32(vacc4567, vshift); vacc89AB = _mm_sra_epi32(vacc89AB, vshift); vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift); vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift); vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift); vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift); vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift); const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point); const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point); const __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point); const __m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point); __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF); __m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV); vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min); voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min); vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max); voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max); _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF); _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV); output += 32; } if XNN_UNLIKELY(batch != 0) { do { const __m128i va0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a))); const __m128i vb0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b))); const __m128i va4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4))); const __m128i vb4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4))); input_a += 8; input_b += 8; __m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier)); __m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier)); vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier)); vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier)); vacc0123 = _mm_sra_epi32(vacc0123, vshift); vacc4567 = _mm_sra_epi32(vacc4567, vshift); const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point); __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567); vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min); vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max); if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) { _mm_storel_epi64((__m128i*) output, vout0123456701234567); output += 8; batch -= 8 * sizeof(int8_t); } else { if (batch & (4 * sizeof(int8_t))) { unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567)); vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32); output += 4; } if (batch & (2 * sizeof(int8_t))) { unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0)); vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16); output += 2; } if (batch & (1 * sizeof(int8_t))) { *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0); } batch = 0; } } while (batch != 0); } }