tesseract  5.0.0-alpha-619-ge9db
intsimdmatrixsse.cpp
Go to the documentation of this file.
1 // File: intsindmatrixsse.cpp
3 // Description: SSE implementation of 8-bit int SIMD matrix multiply.
4 // Author: Ray Smith
5 //
6 // (C) Copyright 2017, Google Inc.
7 // Licensed under the Apache License, Version 2.0 (the "License");
8 // you may not use this file except in compliance with the License.
9 // You may obtain a copy of the License at
10 // http://www.apache.org/licenses/LICENSE-2.0
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
17 
18 #if !defined(__SSE4_1__)
19 #error Implementation only for SSE 4.1 capable architectures
20 #endif
21 
22 #include "intsimdmatrix.h"
23 
24 #include <cstdint>
25 #include <emmintrin.h>
26 #include <smmintrin.h>
27 
28 namespace tesseract {
29 
30 // Computes and returns the dot product of the n-vectors u and v.
31 // Uses Intel SSE intrinsics to access the SIMD instruction set.
32 static int32_t IntDotProductSSE(const int8_t* u, const int8_t* v, int n) {
33  int max_offset = n - 8;
34  int offset = 0;
35  // Accumulate a set of 4 32-bit sums in sum, by loading 8 pairs of 8-bit
36  // values, extending to 16 bit, multiplying to make 32 bit results.
37  int32_t result = 0;
38  if (offset <= max_offset) {
39  offset = 8;
40  __m128i packed1 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(u));
41  __m128i packed2 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(v));
42  __m128i sum = _mm_cvtepi8_epi16(packed1);
43  packed2 = _mm_cvtepi8_epi16(packed2);
44  // The magic _mm_add_epi16 is perfect here. It multiplies 8 pairs of 16 bit
45  // ints to make 32 bit results, which are then horizontally added in pairs
46  // to make 4 32 bit results that still fit in a 128 bit register.
47  sum = _mm_madd_epi16(sum, packed2);
48  while (offset <= max_offset) {
49  packed1 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(u + offset));
50  packed2 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(v + offset));
51  offset += 8;
52  packed1 = _mm_cvtepi8_epi16(packed1);
53  packed2 = _mm_cvtepi8_epi16(packed2);
54  packed1 = _mm_madd_epi16(packed1, packed2);
55  sum = _mm_add_epi32(sum, packed1);
56  }
57  // Sum the 4 packed 32 bit sums and extract the low result.
58  sum = _mm_hadd_epi32(sum, sum);
59  sum = _mm_hadd_epi32(sum, sum);
60  result = _mm_cvtsi128_si32(sum);
61  }
62  while (offset < n) {
63  result += u[offset] * v[offset];
64  ++offset;
65  }
66  return result;
67 }
68 
69 // Computes part of matrix.vector v = Wu. Computes 1 result.
70 static void PartialMatrixDotVector1(const int8_t* wi, const double* scales,
71  const int8_t* u, int num_in,
72  double* v) {
73  double total = IntDotProductSSE(u, wi, num_in);
74  // Add in the bias and correct for integer values.
75  *v = (total / INT8_MAX + wi[num_in]) * *scales;
76 }
77 
78 static void matrixDotVector(int dim1, int dim2, const int8_t* wi,
79  const double* scales, const int8_t* u, double* v) {
80  const int num_out = dim1;
81  const int num_in = dim2 - 1;
82  int output = 0;
83 
84  for (; output < num_out; output++) {
85  PartialMatrixDotVector1(wi, scales, u, num_in, v);
86  wi += dim2;
87  scales++;
88  v++;
89  }
90 }
91 
92 const IntSimdMatrix IntSimdMatrix::intSimdMatrixSSE = {
93  matrixDotVector,
94  // Number of 32 bit outputs held in each register.
95  1,
96  // Maximum number of registers that we will use to hold outputs.
97  1,
98  // Number of 8 bit inputs in the inputs register.
99  1,
100  // Number of inputs in each weight group.
101  1
102 };
103 
104 } // namespace tesseract.
tesseract
Definition: baseapi.h:65
tesseract::IntSimdMatrix::intSimdMatrixSSE
static const IntSimdMatrix intSimdMatrixSSE
Definition: intsimdmatrix.h:118
intsimdmatrix.h