// Auto-generated file. Do not edit! // Template: src/qs8-gemm/c4-neondot.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include #include #include "xnnpack/gemm.h" #include "xnnpack/math.h" void xnn_qd8_f16_qb4w_gemm_minmax_ukernel_2x8c4__neondotfp16arith( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, xnn_float16* restrict c, size_t cm_stride, size_t cn_stride, const struct xnn_f16_qb4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)], const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); const int8_t* a0 = a; uint16_t* c0 = (uint16_t*) c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { a1 = a0; c1 = c0; } size_t bl = params->scalar.blocksize; assert(bl <= kc); assert(bl != 0); assert(bl % 32 == 0); const int8x16_t vmask = vmovq_n_s8(INT8_C(0xF0)); // Loop over groups of 8 columns. do { // Initialize accumulators with bias. 8 bias values are loaded from the // weight matrix, at the start of the group of 8 columns. const float32x4_t vinput_zero_point01 = vcvtq_f32_s32(vld1q_s32(&quantization_params[0].zero_point)); const float32x4_t vksum0123 = vld1q_f32(w); w = (const float*) w + 4; float32x4_t vout0x0123 = vmulq_lane_f32(vksum0123, vget_low_f32(vinput_zero_point01), 0); float32x4_t vout1x0123 = vmulq_lane_f32(vksum0123, vget_high_f32(vinput_zero_point01), 0); const float32x4_t vksum4567 = vld1q_f32(w); w = (const float*) w + 4; float32x4_t vout0x4567 = vmulq_lane_f32(vksum4567, vget_low_f32(vinput_zero_point01), 0); float32x4_t vout1x4567 = vmulq_lane_f32(vksum4567, vget_high_f32(vinput_zero_point01), 0); for (size_t kb=0; kb < kc; kb += bl) { int32x4_t vacc0x0123 = vdupq_n_s32(0); int32x4_t vacc1x0123 = vdupq_n_s32(0); int32x4_t vacc0x4567 = vdupq_n_s32(0); int32x4_t vacc1x4567 = vdupq_n_s32(0); // Inner accumulation loop along the 8 columns. size_t k = bl; // 2x partial unrolled loop to load 8 bytes at a time. while (k >= 8 * sizeof(int8_t)) { // Load a 2x8 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8; // Load a 8x8 block of weights. const int8x16_t vb01234567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb01234567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x0123 = vshlq_n_s8(vb01234567x0123, 4); const int8x16_t vb0123x4567 = vshlq_n_s8(vb01234567x4567, 4); const int8x16_t vb4567x0123 = vandq_s8(vb01234567x0123, vmask); const int8x16_t vb4567x4567 = vandq_s8(vb01234567x4567, vmask); // Multiply-accumulate: 2x8 * 8x8 --> 2x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1); k -= 8 * sizeof(int8_t); } // Handle up to 4 final positions of `k` if XNN_UNLIKELY(k != 0) { // Load a 2x4 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4; // Load a 4x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 2x4 * 4x8 --> 2x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); } const float32x4_t vfilter_output_scale0123 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(w), 16)); w = (const uint16_t*) w + 4; const float32x4_t vfilter_output_scale4567 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(w), 16)); w = (const uint16_t*) w + 4; float32x4_t vf0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vf1x0123 = vcvtq_f32_s32(vacc1x0123); vout0x0123 = vfmaq_f32(vout0x0123, vf0x0123, vfilter_output_scale0123); vout1x0123 = vfmaq_f32(vout1x0123, vf1x0123, vfilter_output_scale0123); float32x4_t vf0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vf1x4567 = vcvtq_f32_s32(vacc1x4567); vout0x4567 = vfmaq_f32(vout0x4567, vf0x4567, vfilter_output_scale4567); vout1x4567 = vfmaq_f32(vout1x4567, vf1x4567, vfilter_output_scale4567); } const float32x4_t vinput_scale01 = vreinterpretq_f32_s32(vld1q_s32(&quantization_params[0].zero_point)); vout0x0123 = vmulq_lane_f32(vout0x0123, vget_low_f32(vinput_scale01), 1); vout1x0123 = vmulq_lane_f32(vout1x0123, vget_high_f32(vinput_scale01), 1); vout0x4567 = vmulq_lane_f32(vout0x4567, vget_low_f32(vinput_scale01), 1); vout1x4567 = vmulq_lane_f32(vout1x4567, vget_high_f32(vinput_scale01), 1); const float32x4_t vbias0123 = vld1q_f32(w); w = (const float*) w + 4; vout0x0123 = vaddq_f32(vbias0123, vout0x0123); vout1x0123 = vaddq_f32(vbias0123, vout1x0123); const float32x4_t vbias4567 = vld1q_f32(w); w = (const float*) w + 4; vout0x4567 = vaddq_f32(vbias4567, vout0x4567); vout1x4567 = vaddq_f32(vbias4567, vout1x4567); float16x8_t vfp16out0x01234567 = vcombine_f16(vcvt_f16_f32(vout0x0123), vcvt_f16_f32(vout0x4567)); float16x8_t vfp16out1x01234567 = vcombine_f16(vcvt_f16_f32(vout1x0123), vcvt_f16_f32(vout1x4567)); const float16x8_t voutput_min = vreinterpretq_f16_u16(vld1q_dup_u16((const uint16_t*) ¶ms->scalar.min)); vfp16out0x01234567 = vmaxq_f16(vfp16out0x01234567, voutput_min); vfp16out1x01234567 = vmaxq_f16(vfp16out1x01234567, voutput_min); const float16x8_t voutput_max = vreinterpretq_f16_u16(vld1q_dup_u16((const uint16_t*) ¶ms->scalar.max)); vfp16out0x01234567 = vminq_f16(vfp16out0x01234567, voutput_max); vfp16out1x01234567 = vminq_f16(vfp16out1x01234567, voutput_max); if XNN_LIKELY(nc >= 8) { vst1q_u16(c0, vreinterpretq_u16_f16(vfp16out0x01234567)); vst1q_u16(c1, vreinterpretq_u16_f16(vfp16out1x01234567)); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride); c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride); nc -= 8; } else { float16x4_t vfp16out0x0123 = vget_low_f16(vfp16out0x01234567); float16x4_t vfp16out1x0123 = vget_low_f16(vfp16out1x01234567); if (nc & 4) { vst1_u16(c0, vreinterpret_u16_f16(vfp16out0x0123)); c0 += 4; vst1_u16(c1, vreinterpret_u16_f16(vfp16out1x0123)); c1 += 4; vfp16out0x0123 = vget_high_f16(vfp16out0x01234567); vfp16out1x0123 = vget_high_f16(vfp16out1x01234567); } if (nc & 2) { vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vfp16out0x0123), 0); c0 += 2; vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vfp16out1x0123), 0); c1 += 2; vfp16out0x0123 = vext_f16(vfp16out0x0123, vfp16out0x0123, 2); vfp16out1x0123 = vext_f16(vfp16out1x0123, vfp16out1x0123, 2); } if (nc & 1) { vst1_lane_u16(c0, vreinterpret_u16_f16(vfp16out0x0123), 0); vst1_lane_u16(c1, vreinterpret_u16_f16(vfp16out1x0123), 0); } nc = 0; } } while (nc != 0); }