// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

$assert BATCH_TILE % 8 == 0
$assert BATCH_TILE >= 8
#include <assert.h>

#include <arm_neon.h>

#include "xnnpack/common.h"
#include "xnnpack/intrinsics-polyfill.h"
#include "xnnpack/vcvt.h"


void xnn_f16_qs8_vcvt_ukernel__neonfp16arith_u${BATCH_TILE}(
    size_t batch,
    const xnn_float16* input,
    int8_t* output,
    const struct xnn_f16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
  assert(batch != 0);
  assert(batch % sizeof(uint16_t) == 0);
  assert(input != NULL);
  assert(output != NULL);

  const uint16_t* i = (const uint16_t*) input;

  const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(&params->scalar.scale));
  const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->scalar.output_zero_point);
  $if BATCH_TILE > 8:
    for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) {
      $for N in range(0, BATCH_TILE, 8):
        float16x8_t vx${N} = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;

      $for N in range(0, BATCH_TILE, 8):
        vx${N} = vmulq_f16(vx${N}, vscale);

      $for N in range(0, BATCH_TILE, 8):
        int16x8_t vacc${N} = vcvtnq_s16_f16(vx${N});

      $for N in range(0, BATCH_TILE, 8):
        vacc${N} = vqaddq_s16(vacc${N}, voutput_zero_point);

      $for N in range(0, BATCH_TILE, 16):
        $if N + 8 < BATCH_TILE:
          int8x16_t vy${N} = vcombine_s8(vqmovn_s16(vacc${N}), vqmovn_s16(vacc${N+8}));
        $else:
          int8x8_t vy${N} = vqmovn_s16(vacc${N});

      $for N in range(0, BATCH_TILE, 16):
        $if N + 8 < BATCH_TILE:
          vst1q_s8(output, vy${N}); output += 16;
        $else:
          vst1_s8(output, vy${N}); output += 8;
    }
  for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
    float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;

    vx = vmulq_f16(vx, vscale);

    int16x8_t vacc = vcvtnq_s16_f16(vx);

    vacc = vqaddq_s16(vacc, voutput_zero_point);

    int8x8_t vy = vqmovn_s16(vacc);
    vst1_s8(output, vy); output += 8;
  }
  if XNN_UNLIKELY(batch != 0) {
    assert(batch >= 1 * sizeof(uint16_t));
    assert(batch <= 7 * sizeof(uint16_t));
    float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));

    vx = vmulq_f16(vx, vscale);

    int16x8_t vacc = vcvtnq_s16_f16(vx);
    vacc = vqaddq_s16(vacc, voutput_zero_point);

    int8x8_t vy = vqmovn_s16(vacc);

    if (batch & (4 * sizeof(uint16_t))) {
      vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
      vy = vext_s8(vy, vy, 4);
    }
    if (batch & (2 * sizeof(uint16_t))) {
      vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
      vy = vext_s8(vy, vy, 2);
    }
    if (batch & (1 * sizeof(uint16_t))) {
      vst1_lane_s8(output, vy, 0);
    }
  }
}
