// Auto-generated file. Do not edit! // Template: src/qs8-gemm/c8-neoni8mm.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include #include #include "xnnpack/gemm.h" #include "xnnpack/intrinsics-polyfill.h" #include "xnnpack/math.h" void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__neoni8mm( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { a1 = a0; c1 = c0; } // Loop over groups of 8 columns. do { // Initialize accumulators with bias. 8 bias values are loaded from the // weight matrix, at the start of the group of 8 columns. const uint64x2x2_t vbias01x0123 = vld2q_dup_u64(w); w = (const int32_t*) w + 4; const uint64x2x2_t vbias01x4567 = vld2q_dup_u64(w); w = (const int32_t*) w + 4; int32x4_t vacc01x01 = vreinterpretq_s32_u64(vbias01x0123.val[0]); int32x4_t vacc01x23 = vreinterpretq_s32_u64(vbias01x0123.val[1]); int32x4_t vacc01x45 = vreinterpretq_s32_u64(vbias01x4567.val[0]); int32x4_t vacc01x67 = vreinterpretq_s32_u64(vbias01x4567.val[1]); // Inner accumulation loop along the 8 columns. size_t k = kc; // 2x partial unrolled loop to load 8 bytes at a time. uint64x2x2_t va01x0123456789ABCDEF; va01x0123456789ABCDEF.val[0] = vdupq_n_u64(0); va01x0123456789ABCDEF.val[1] = vdupq_n_u64(0); while (k >= 16 * sizeof(int8_t)) { // Load a 2x16 block of activations. va01x0123456789ABCDEF = vld2q_lane_u64((const void*) a0, va01x0123456789ABCDEF, 0); a0 += 16; va01x0123456789ABCDEF = vld2q_lane_u64((const void*) a1, va01x0123456789ABCDEF, 1); a1 += 16; const int8x16_t va01x01234567 = vreinterpretq_s8_u64(va01x0123456789ABCDEF.val[0]); const int8x16_t va01x89ABCDEF = vreinterpretq_s8_u64(va01x0123456789ABCDEF.val[1]); // Load a 16x8 block of weights. const int8x16_t vb01x01234567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb23x01234567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb45x01234567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb67x01234567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb01x89ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb23x89ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb45x89ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb67x89ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 2x8 * 8x8 --> 2x8. vacc01x01 = vmmlaq_s32(vacc01x01, va01x01234567, vb01x01234567); vacc01x23 = vmmlaq_s32(vacc01x23, va01x01234567, vb23x01234567); vacc01x45 = vmmlaq_s32(vacc01x45, va01x01234567, vb45x01234567); vacc01x67 = vmmlaq_s32(vacc01x67, va01x01234567, vb67x01234567); vacc01x01 = vmmlaq_s32(vacc01x01, va01x89ABCDEF, vb01x89ABCDEF); vacc01x23 = vmmlaq_s32(vacc01x23, va01x89ABCDEF, vb23x89ABCDEF); vacc01x45 = vmmlaq_s32(vacc01x45, va01x89ABCDEF, vb45x89ABCDEF); vacc01x67 = vmmlaq_s32(vacc01x67, va01x89ABCDEF, vb67x89ABCDEF); k -= 16 * sizeof(int8_t); } // Handle up to 8 final positions of `k` if XNN_UNLIKELY(k != 0) { // Load a 2x8 block of activations. uint64x2_t va01x01234567 = vld1q_dup_u64((const void*) a0); a0 += 8; va01x01234567 = vld1q_lane_u64((const void*) a1, va01x01234567, 1); a1 += 8; // Load a 16x8 block of weights. const int8x16_t vb01x01234567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb23x01234567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb45x01234567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb67x01234567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 2x4 * 4x8 --> 2x8. vacc01x01 = vmmlaq_s32(vacc01x01, vreinterpretq_s8_u64(va01x01234567), vb01x01234567); vacc01x23 = vmmlaq_s32(vacc01x23, vreinterpretq_s8_u64(va01x01234567), vb23x01234567); vacc01x45 = vmmlaq_s32(vacc01x45, vreinterpretq_s8_u64(va01x01234567), vb45x01234567); vacc01x67 = vmmlaq_s32(vacc01x67, vreinterpretq_s8_u64(va01x01234567), vb67x01234567); } int32x4_t vacc0x0123 = vreinterpretq_s32_u64(vtrn1q_u64(vreinterpretq_u64_s32(vacc01x01), vreinterpretq_u64_s32(vacc01x23))); int32x4_t vacc1x0123 = vreinterpretq_s32_u64(vtrn2q_u64(vreinterpretq_u64_s32(vacc01x01), vreinterpretq_u64_s32(vacc01x23))); int32x4_t vacc0x4567 = vreinterpretq_s32_u64(vtrn1q_u64(vreinterpretq_u64_s32(vacc01x45), vreinterpretq_u64_s32(vacc01x67))); int32x4_t vacc1x4567 = vreinterpretq_s32_u64(vtrn2q_u64(vreinterpretq_u64_s32(vacc01x45), vreinterpretq_u64_s32(vacc01x67))); float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min); const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { // Main case where there the 8 columns fit in the destination. vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); // Advance to the next 8 columns. c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); nc -= 8; } else { // Final case where not all of the 8 columns fit in the destination. if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); } nc = 0; } } while (nc != 0); }