// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx8c8-avx2.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include #include #include "xnnpack/igemm.h" #include "xnnpack/intrinsics-polyfill.h" #include "xnnpack/math.h" #include "xnnpack/unaligned.h" void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_1x8c8__avx256skx( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, float* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const int8_t* zero_data, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)], const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); float* c0 = c; const __m256i vinput_zero_point = _mm256_set1_epi32((int) quantization_params->zero_point); const __m256 vinput_scale = _mm256_broadcast_ss(&quantization_params->inv_scale); const __m256 vmin = _mm256_set1_ps(params->scalar.min); const __m256 vmax = _mm256_set1_ps(params->scalar.max); // XNN_FORCE_REALIZATION(vinput_zero_point); // XNN_FORCE_REALIZATION(vinput_scale); // XNN_FORCE_REALIZATION(vmin); // XNN_FORCE_REALIZATION(vmax); do { const __m128i vinit0 = _mm_cvtsi32_si128(((const int*) w)[0]); const __m128i vinit1 = _mm_cvtsi32_si128(((const int*) w)[1]); const __m256i vinit01 = _mm256_inserti128_si256(_mm256_castsi128_si256(vinit0), vinit1, 1); const __m128i vinit2 = _mm_cvtsi32_si128(((const int*) w)[2]); const __m128i vinit3 = _mm_cvtsi32_si128(((const int*) w)[3]); const __m256i vinit23 = _mm256_inserti128_si256(_mm256_castsi128_si256(vinit2), vinit3, 1); const __m128i vinit4 = _mm_cvtsi32_si128(((const int*) w)[4]); const __m128i vinit5 = _mm_cvtsi32_si128(((const int*) w)[5]); const __m256i vinit45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vinit4), vinit5, 1); const __m128i vinit6 = _mm_cvtsi32_si128(((const int*) w)[6]); const __m128i vinit7 = _mm_cvtsi32_si128(((const int*) w)[7]); const __m256i vinit67 = _mm256_inserti128_si256(_mm256_castsi128_si256(vinit6), vinit7, 1); __m256i vacc0x01 = _mm256_mullo_epi32(vinit01, vinput_zero_point); __m256i vacc0x23 = _mm256_mullo_epi32(vinit23, vinput_zero_point); __m256i vacc0x45 = _mm256_mullo_epi32(vinit45, vinput_zero_point); __m256i vacc0x67 = _mm256_mullo_epi32(vinit67, vinput_zero_point); w = (const int32_t*) w + 8; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } else { a0 = zero_data; } a += 1; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a0)); const __m256i vxa0 = _mm256_cvtepi8_epi16(va0); a0 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m256i vxb01 = _mm256_cvtepi8_epi16(vb01); vacc0x01 = _mm256_add_epi32(vacc0x01, _mm256_madd_epi16(vxa0, vxb01)); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m256i vxb23 = _mm256_cvtepi8_epi16(vb23); vacc0x23 = _mm256_add_epi32(vacc0x23, _mm256_madd_epi16(vxa0, vxb23)); const __m128i vb45 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 32)); const __m256i vxb45 = _mm256_cvtepi8_epi16(vb45); vacc0x45 = _mm256_add_epi32(vacc0x45, _mm256_madd_epi16(vxa0, vxb45)); const __m128i vb67 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 48)); const __m256i vxb67 = _mm256_cvtepi8_epi16(vb67); vacc0x67 = _mm256_add_epi32(vacc0x67, _mm256_madd_epi16(vxa0, vxb67)); w = (const void*) ((const int8_t*) w + 64); k += 8 * sizeof(int8_t); } p -= 1 * sizeof(void*); } while (p != 0); const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0); __m256i vacc0x01234567 = _mm256_permutevar8x32_epi32(vacc0x02461357, vpermute_mask); __m256 vout0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567); vout0x01234567 = _mm256_mul_ps(vout0x01234567, vinput_scale); const __m256 vfilter_output_scale01234567 = _mm256_load_ps((const float*) w); const __m256 vbias01234567 = _mm256_load_ps((const float*) w + 8); w = (const float*) w + 16; vout0x01234567 = _mm256_fmadd_ps(vout0x01234567, vfilter_output_scale01234567, vbias01234567); vout0x01234567 = _mm256_max_ps(vout0x01234567, vmin); vout0x01234567 = _mm256_min_ps(vout0x01234567, vmax); if XNN_LIKELY(nc >= 8) { _mm256_storeu_ps(c0, vout0x01234567); c0 = (float*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { // Prepare mask for valid 32-bit elements (depends on nc). const __mmask8 vmask = _cvtu32_mask8((UINT32_C(1) << nc) - 1); _mm256_mask_storeu_ps(c0, vmask, vout0x01234567); nc = 0; } } while (nc != 0); }