// Auto-generated file. Do not edit! // Template: src/f32-vhswish/avx.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include #include #include "xnnpack/common.h" #include "xnnpack/vunary.h" void xnn_f32_vhswish_ukernel__avx_u16( size_t batch, const float* input, float* output, const struct xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input != NULL); assert(output != NULL); static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0}; const __m256 vsixth = _mm256_set1_ps(0x1.555556p-3f); const __m256 vhalf = _mm256_set1_ps(0.5f); const __m256 vone = _mm256_set1_ps(1.0f); const __m256 vzero = _mm256_setzero_ps(); XNN_FORCE_REALIZATION(vsixth); XNN_FORCE_REALIZATION(vhalf); XNN_FORCE_REALIZATION(vone); // XNN_FORCE_REALIZATION(vzero); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { const __m256 vx01234567 = _mm256_loadu_ps(input); const __m256 vx89ABCDEF = _mm256_loadu_ps(input + 8); input += 16; __m256 vacc01234567 = _mm256_mul_ps(vx01234567, vsixth); __m256 vacc89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vsixth); vacc01234567 = _mm256_add_ps(vacc01234567, vhalf); vacc89ABCDEF = _mm256_add_ps(vacc89ABCDEF, vhalf); vacc01234567 = _mm256_max_ps(vacc01234567, vzero); vacc89ABCDEF = _mm256_max_ps(vacc89ABCDEF, vzero); vacc01234567 = _mm256_min_ps(vacc01234567, vone); vacc89ABCDEF = _mm256_min_ps(vacc89ABCDEF, vone); vacc01234567 = _mm256_mul_ps(vacc01234567, vx01234567); vacc89ABCDEF = _mm256_mul_ps(vacc89ABCDEF, vx89ABCDEF); _mm256_storeu_ps(output, vacc01234567); _mm256_storeu_ps(output + 8, vacc89ABCDEF); output += 16; } for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const __m256 vx = _mm256_loadu_ps(input); input += 8; __m256 vacc = _mm256_mul_ps(vx, vsixth); vacc = _mm256_add_ps(vacc, vhalf); vacc = _mm256_max_ps(vacc, vzero); vacc = _mm256_min_ps(vacc, vone); vacc = _mm256_mul_ps(vacc, vx); _mm256_storeu_ps(output, vacc); output += 8; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 7 * sizeof(float)); const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch)); const __m256 vx = _mm256_maskload_ps(input, vmask); __m256 vacc = _mm256_mul_ps(vx, vsixth); vacc = _mm256_add_ps(vacc, vhalf); vacc = _mm256_max_ps(vacc, vzero); vacc = _mm256_min_ps(vacc, vone); vacc = _mm256_mul_ps(vacc, vx); __m128 vacc_lo = _mm256_castps256_ps128(vacc); if (batch & (4 * sizeof(float))) { _mm_storeu_ps(output, vacc_lo); vacc_lo = _mm256_extractf128_ps(vacc, 1); output += 4; } if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc_lo); vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc_lo); } } }