tesseract  5.0.0-alpha-619-ge9db
weightmatrix.cpp
Go to the documentation of this file.
1 // File: weightmatrix.cpp
3 // Description: Hides distinction between float/int implementations.
4 // Author: Ray Smith
5 //
6 // (C) Copyright 2014, Google Inc.
7 // Licensed under the Apache License, Version 2.0 (the "License");
8 // you may not use this file except in compliance with the License.
9 // You may obtain a copy of the License at
10 // http://www.apache.org/licenses/LICENSE-2.0
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
17 
18 #include "weightmatrix.h"
19 
20 #include <cassert> // for assert
21 #include "intsimdmatrix.h"
22 #include "simddetect.h" // for DotProduct
23 #include "statistc.h"
24 #include "tprintf.h"
25 
26 namespace tesseract {
27 
28 #if defined(ANDROID)
29 static inline double log2(double n) {
30  return log(n) / log(2.0);
31 }
32 #endif // ANDROID
33 
34 // Number of iterations after which the correction effectively becomes unity.
35 const int kAdamCorrectionIterations = 200000;
36 // Epsilon in Adam to prevent division by zero.
37 const double kAdamEpsilon = 1e-8;
38 
39 // Computes matrix.vector v = Wu.
40 // u is of size W.dim2() - add_bias_fwd and the output v is of size
41 // W.dim1() - skip_bias_back.
42 // If add_bias_fwd, u is imagined to have an extra element at the end with value
43 // 1, to implement the bias, weight.
44 // If skip_bias_back, we are actually performing the backwards product on a
45 // transposed matrix, so we need to drop the v output corresponding to the last
46 // element in dim1.
47 static inline void MatrixDotVectorInternal(const GENERIC_2D_ARRAY<double>& w,
48  bool add_bias_fwd,
49  bool skip_bias_back, const double* u,
50  double* v) {
51  int num_results = w.dim1() - skip_bias_back;
52  int extent = w.dim2() - add_bias_fwd;
53  for (int i = 0; i < num_results; ++i) {
54  const double* wi = w[i];
55  double total = DotProduct(wi, u, extent);
56  if (add_bias_fwd) total += wi[extent]; // The bias value.
57  v[i] = total;
58  }
59 }
60 
61 // Copies the whole input transposed, converted to double, into *this.
63  int width = input.dim1();
64  int num_features = input.dim2();
65  ResizeNoInit(num_features, width);
66  for (int t = 0; t < width; ++t) WriteStrided(t, input[t]);
67 }
68 
69 // Destructor.
70 // It is defined here, so the compiler can create a single vtable
71 // instead of weak vtables in every compilation unit.
73 
74 // Sets up the network for training. Initializes weights using weights of
75 // scale `range` picked according to the random number generator `randomizer`.
76 int WeightMatrix::InitWeightsFloat(int no, int ni, bool use_adam,
77  float weight_range, TRand* randomizer) {
78  int_mode_ = false;
79  wf_.Resize(no, ni, 0.0);
80  if (randomizer != nullptr) {
81  for (int i = 0; i < no; ++i) {
82  for (int j = 0; j < ni; ++j) {
83  wf_[i][j] = randomizer->SignedRand(weight_range);
84  }
85  }
86  }
87  use_adam_ = use_adam;
88  InitBackward();
89  return ni * no;
90 }
91 
92 // Changes the number of outputs to the size of the given code_map, copying
93 // the old weight matrix entries for each output from code_map[output] where
94 // non-negative, and uses the mean (over all outputs) of the existing weights
95 // for all outputs with negative code_map entries. Returns the new number of
96 // weights.
97 int WeightMatrix::RemapOutputs(const std::vector<int>& code_map) {
98  GENERIC_2D_ARRAY<double> old_wf(wf_);
99  int old_no = wf_.dim1();
100  int new_no = code_map.size();
101  int ni = wf_.dim2();
102  std::vector<double> means(ni, 0.0);
103  for (int c = 0; c < old_no; ++c) {
104  const double* weights = wf_[c];
105  for (int i = 0; i < ni; ++i) means[i] += weights[i];
106  }
107  for (double& mean : means) mean /= old_no;
108  wf_.ResizeNoInit(new_no, ni);
109  InitBackward();
110  for (int dest = 0; dest < new_no; ++dest) {
111  int src = code_map[dest];
112  const double* src_data = src >= 0 ? old_wf[src] : means.data();
113  memcpy(wf_[dest], src_data, ni * sizeof(*src_data));
114  }
115  return ni * new_no;
116 }
117 
118 // Converts a float network to an int network. Each set of input weights that
119 // corresponds to a single output weight is converted independently:
120 // Compute the max absolute value of the weight set.
121 // Scale so the max absolute value becomes INT8_MAX.
122 // Round to integer.
123 // Store a multiplicative scale factor (as a double) that will reproduce
124 // the original value, subject to rounding errors.
126  wi_.ResizeNoInit(wf_.dim1(), wf_.dim2());
127  scales_.init_to_size(wi_.dim1(), 0.0);
128  int dim2 = wi_.dim2();
129  for (int t = 0; t < wi_.dim1(); ++t) {
130  double* f_line = wf_[t];
131  int8_t* i_line = wi_[t];
132  double max_abs = 0.0;
133  for (int f = 0; f < dim2; ++f) {
134  double abs_val = fabs(f_line[f]);
135  if (abs_val > max_abs) max_abs = abs_val;
136  }
137  double scale = max_abs / INT8_MAX;
138  scales_[t] = scale;
139  if (scale == 0.0) scale = 1.0;
140  for (int f = 0; f < dim2; ++f) {
141  i_line[f] = IntCastRounded(f_line[f] / scale);
142  }
143  }
144  wf_.Resize(1, 1, 0.0);
145  int_mode_ = true;
147  IntSimdMatrix::intSimdMatrix->Init(wi_, shaped_w_);
148  }
149 }
150 
151 // Allocates any needed memory for running Backward, and zeroes the deltas,
152 // thus eliminating any existing momentum.
154  int no = int_mode_ ? wi_.dim1() : wf_.dim1();
155  int ni = int_mode_ ? wi_.dim2() : wf_.dim2();
156  dw_.Resize(no, ni, 0.0);
157  updates_.Resize(no, ni, 0.0);
158  wf_t_.Transpose(wf_);
159  if (use_adam_) dw_sq_sum_.Resize(no, ni, 0.0);
160 }
161 
162 // Flag on mode to indicate that this weightmatrix uses int8_t.
163 const int kInt8Flag = 1;
164 // Flag on mode to indicate that this weightmatrix uses adam.
165 const int kAdamFlag = 4;
166 // Flag on mode to indicate that this weightmatrix uses double. Set
167 // independently of kInt8Flag as even in int mode the scales can
168 // be float or double.
169 const int kDoubleFlag = 128;
170 
171 // Writes to the given file. Returns false in case of error.
172 bool WeightMatrix::Serialize(bool training, TFile* fp) const {
173  // For backward compatibility, add kDoubleFlag to mode to indicate the doubles
174  // format, without errs, so we can detect and read old format weight matrices.
175  uint8_t mode =
176  (int_mode_ ? kInt8Flag : 0) | (use_adam_ ? kAdamFlag : 0) | kDoubleFlag;
177  if (!fp->Serialize(&mode)) return false;
178  if (int_mode_) {
179  if (!wi_.Serialize(fp)) return false;
180  if (!scales_.Serialize(fp)) return false;
181  } else {
182  if (!wf_.Serialize(fp)) return false;
183  if (training && !updates_.Serialize(fp)) return false;
184  if (training && use_adam_ && !dw_sq_sum_.Serialize(fp)) return false;
185  }
186  return true;
187 }
188 
189 // Reads from the given file. Returns false in case of error.
190 
191 bool WeightMatrix::DeSerialize(bool training, TFile* fp) {
192  uint8_t mode;
193  if (!fp->DeSerialize(&mode)) return false;
194  int_mode_ = (mode & kInt8Flag) != 0;
195  use_adam_ = (mode & kAdamFlag) != 0;
196  if ((mode & kDoubleFlag) == 0) return DeSerializeOld(training, fp);
197  if (int_mode_) {
198  if (!wi_.DeSerialize(fp)) return false;
199  if (!scales_.DeSerialize(fp)) return false;
201  IntSimdMatrix::intSimdMatrix->Init(wi_, shaped_w_);
202  }
203  } else {
204  if (!wf_.DeSerialize(fp)) return false;
205  if (training) {
206  InitBackward();
207  if (!updates_.DeSerialize(fp)) return false;
208  if (use_adam_ && !dw_sq_sum_.DeSerialize(fp)) return false;
209  }
210  }
211  return true;
212 }
213 
214 // As DeSerialize, but reads an old (float) format WeightMatrix for
215 // backward compatibility.
216 bool WeightMatrix::DeSerializeOld(bool training, TFile* fp) {
217  GENERIC_2D_ARRAY<float> float_array;
218  if (int_mode_) {
219  if (!wi_.DeSerialize(fp)) return false;
220  GenericVector<float> old_scales;
221  if (!old_scales.DeSerialize(fp)) return false;
222  scales_.resize_no_init(old_scales.size());
223  for (int i = 0; i < old_scales.size(); ++i) scales_[i] = old_scales[i];
224  } else {
225  if (!float_array.DeSerialize(fp)) return false;
226  FloatToDouble(float_array, &wf_);
227  }
228  if (training) {
229  InitBackward();
230  if (!float_array.DeSerialize(fp)) return false;
231  FloatToDouble(float_array, &updates_);
232  // Errs was only used in int training, which is now dead.
233  if (!float_array.DeSerialize(fp)) return false;
234  }
235  return true;
236 }
237 
238 // Computes matrix.vector v = Wu.
239 // u is of size W.dim2() - 1 and the output v is of size W.dim1().
240 // u is imagined to have an extra element at the end with value 1, to
241 // implement the bias, but it doesn't actually have it.
242 // Asserts that the call matches what we have.
243 void WeightMatrix::MatrixDotVector(const double* u, double* v) const {
244  assert(!int_mode_);
245  MatrixDotVectorInternal(wf_, true, false, u, v);
246 }
247 
248 void WeightMatrix::MatrixDotVector(const int8_t* u, double* v) const {
249  assert(int_mode_);
252  wi_.dim1(), wi_.dim2(), &shaped_w_[0], &scales_[0], u, v);
253  } else {
254  IntSimdMatrix::MatrixDotVector(wi_, scales_, u, v);
255  }
256 }
257 
258 // MatrixDotVector for peep weights, MultiplyAccumulate adds the
259 // component-wise products of *this[0] and v to inout.
260 void WeightMatrix::MultiplyAccumulate(const double* v, double* inout) {
261  assert(!int_mode_);
262  assert(wf_.dim1() == 1);
263  int n = wf_.dim2();
264  const double* u = wf_[0];
265  for (int i = 0; i < n; ++i) {
266  inout[i] += u[i] * v[i];
267  }
268 }
269 
270 // Computes vector.matrix v = uW.
271 // u is of size W.dim1() and the output v is of size W.dim2() - 1.
272 // The last result is discarded, as v is assumed to have an imaginary
273 // last value of 1, as with MatrixDotVector.
274 void WeightMatrix::VectorDotMatrix(const double* u, double* v) const {
275  assert(!int_mode_);
276  MatrixDotVectorInternal(wf_t_, false, true, u, v);
277 }
278 
279 // Fills dw_[i][j] with the dot product u[i][] . v[j][], using elements from
280 // u and v. In terms of the neural network, u is the gradients and v is the
281 // inputs.
282 // Note that (matching MatrixDotVector) v[last][] is missing, presumed 1.0.
283 // Runs parallel if requested. Note that u and v must be transposed.
285  const TransposedArray& v,
286  bool in_parallel) {
287  assert(!int_mode_);
288  int num_outputs = dw_.dim1();
289  assert(u.dim1() == num_outputs);
290  assert(u.dim2() == v.dim2());
291  int num_inputs = dw_.dim2() - 1;
292  int num_samples = u.dim2();
293  // v is missing the last element in dim1.
294  assert(v.dim1() == num_inputs);
295 #ifdef _OPENMP
296 #pragma omp parallel for num_threads(4) if (in_parallel)
297 #endif
298  for (int i = 0; i < num_outputs; ++i) {
299  double* dwi = dw_[i];
300  const double* ui = u[i];
301  for (int j = 0; j < num_inputs; ++j) {
302  dwi[j] = DotProduct(ui, v[j], num_samples);
303  }
304  // The last element of v is missing, presumed 1.0f.
305  double total = 0.0;
306  for (int k = 0; k < num_samples; ++k) total += ui[k];
307  dwi[num_inputs] = total;
308  }
309 }
310 
311 // Updates the weights using the given learning rate and momentum.
312 // num_samples is the quotient to be used in the adam computation iff
313 // use_adam_ is true.
314 void WeightMatrix::Update(double learning_rate, double momentum,
315  double adam_beta, int num_samples) {
316  assert(!int_mode_);
317  if (use_adam_ && num_samples > 0 && num_samples < kAdamCorrectionIterations) {
318  learning_rate *= sqrt(1.0 - pow(adam_beta, num_samples));
319  learning_rate /= 1.0 - pow(momentum, num_samples);
320  }
321  if (use_adam_ && num_samples > 0 && momentum > 0.0) {
322  dw_sq_sum_.SumSquares(dw_, adam_beta);
323  dw_ *= learning_rate * (1.0 - momentum);
324  updates_ *= momentum;
325  updates_ += dw_;
326  wf_.AdamUpdate(updates_, dw_sq_sum_, learning_rate * kAdamEpsilon);
327  } else {
328  dw_ *= learning_rate;
329  updates_ += dw_;
330  if (momentum > 0.0) wf_ += updates_;
331  if (momentum >= 0.0) updates_ *= momentum;
332  }
333  wf_t_.Transpose(wf_);
334 }
335 
336 // Adds the dw_ in other to the dw_ is *this.
338  assert(dw_.dim1() == other.dw_.dim1());
339  assert(dw_.dim2() == other.dw_.dim2());
340  dw_ += other.dw_;
341 }
342 
343 // Sums the products of weight updates in *this and other, splitting into
344 // positive (same direction) in *same and negative (different direction) in
345 // *changed.
346 void WeightMatrix::CountAlternators(const WeightMatrix& other, double* same,
347  double* changed) const {
348  int num_outputs = updates_.dim1();
349  int num_inputs = updates_.dim2();
350  assert(num_outputs == other.updates_.dim1());
351  assert(num_inputs == other.updates_.dim2());
352  for (int i = 0; i < num_outputs; ++i) {
353  const double* this_i = updates_[i];
354  const double* other_i = other.updates_[i];
355  for (int j = 0; j < num_inputs; ++j) {
356  double product = this_i[j] * other_i[j];
357  if (product < 0.0)
358  *changed -= product;
359  else
360  *same += product;
361  }
362  }
363 }
364 
365 // Helper computes an integer histogram bucket for a weight and adds it
366 // to the histogram.
367 const int kHistogramBuckets = 16;
368 static void HistogramWeight(double weight, STATS* histogram) {
369  int bucket = kHistogramBuckets - 1;
370  if (weight != 0.0) {
371  double logval = -log2(fabs(weight));
372  bucket = ClipToRange(IntCastRounded(logval), 0, kHistogramBuckets - 1);
373  }
374  histogram->add(bucket, 1);
375 }
376 
377 void WeightMatrix::Debug2D(const char* msg) {
378  STATS histogram(0, kHistogramBuckets);
379  if (int_mode_) {
380  for (int i = 0; i < wi_.dim1(); ++i) {
381  for (int j = 0; j < wi_.dim2(); ++j) {
382  HistogramWeight(wi_[i][j] * scales_[i], &histogram);
383  }
384  }
385  } else {
386  for (int i = 0; i < wf_.dim1(); ++i) {
387  for (int j = 0; j < wf_.dim2(); ++j) {
388  HistogramWeight(wf_[i][j], &histogram);
389  }
390  }
391  }
392  tprintf("%s\n", msg);
393  histogram.print();
394 }
395 
396 // Utility function converts an array of float to the corresponding array
397 // of double.
398 /* static */
401  int dim1 = wf.dim1();
402  int dim2 = wf.dim2();
403  wd->ResizeNoInit(dim1, dim2);
404  for (int i = 0; i < dim1; ++i) {
405  const float* wfi = wf[i];
406  double* wdi = (*wd)[i];
407  for (int j = 0; j < dim2; ++j) wdi[j] = static_cast<double>(wfi[j]);
408  }
409 }
410 
411 } // namespace tesseract.
ClipToRange
T ClipToRange(const T &x, const T &lower_bound, const T &upper_bound)
Definition: helpers.h:106
tesseract::WeightMatrix::CountAlternators
void CountAlternators(const WeightMatrix &other, double *same, double *changed) const
Definition: weightmatrix.cpp:346
weightmatrix.h
tesseract::kAdamEpsilon
const double kAdamEpsilon
Definition: weightmatrix.cpp:37
tesseract::kHistogramBuckets
const int kHistogramBuckets
Definition: weightmatrix.cpp:367
tesseract::TransposedArray::~TransposedArray
~TransposedArray() override
tesseract::WeightMatrix::AddDeltas
void AddDeltas(const WeightMatrix &other)
Definition: weightmatrix.cpp:337
tesseract::WeightMatrix::SumOuterTransposed
void SumOuterTransposed(const TransposedArray &u, const TransposedArray &v, bool parallel)
Definition: weightmatrix.cpp:284
language_specific.log
log
Definition: language_specific.py:25
tesseract::IntSimdMatrix::Init
void Init(const GENERIC_2D_ARRAY< int8_t > &w, std::vector< int8_t > &shaped_w) const
Definition: intsimdmatrix.cpp:29
simddetect.h
tesseract::TransposedArray::WriteStrided
void WriteStrided(int t, const float *data)
Definition: weightmatrix.h:39
GENERIC_2D_ARRAY::DeSerialize
bool DeSerialize(bool swap, FILE *fp)
Definition: matrix.h:160
tesseract::WeightMatrix::Update
void Update(double learning_rate, double momentum, double adam_beta, int num_samples)
Definition: weightmatrix.cpp:314
GenericVector::Serialize
bool Serialize(FILE *fp) const
Definition: genericvector.h:929
tesseract::WeightMatrix
Definition: weightmatrix.h:65
IntCastRounded
int IntCastRounded(double x)
Definition: helpers.h:173
tesseract::kAdamCorrectionIterations
const int kAdamCorrectionIterations
Definition: weightmatrix.cpp:35
tesseract::IntSimdMatrix::matrixDotVectorFunction
MatrixDotVectorFunction matrixDotVectorFunction
Definition: intsimdmatrix.h:103
GENERIC_2D_ARRAY< double >
tesseract::WeightMatrix::Debug2D
void Debug2D(const char *msg)
Definition: weightmatrix.cpp:377
tesseract::WeightMatrix::InitBackward
void InitBackward()
Definition: weightmatrix.cpp:153
statistc.h
GENERIC_2D_ARRAY::dim2
int dim2() const
Definition: matrix.h:206
tesseract::TransposedArray::Transpose
void Transpose(const GENERIC_2D_ARRAY< double > &input)
Definition: weightmatrix.cpp:62
tesseract::TFile::DeSerialize
bool DeSerialize(char *data, size_t count=1)
Definition: serialis.cpp:117
GenericVector::DeSerialize
bool DeSerialize(bool swap, FILE *fp)
Definition: genericvector.h:954
tesseract::TFile::Serialize
bool Serialize(const char *data, size_t count=1)
Definition: serialis.cpp:161
tesseract::kAdamFlag
const int kAdamFlag
Definition: weightmatrix.cpp:165
tesseract::WeightMatrix::DeSerialize
bool DeSerialize(bool training, TFile *fp)
Definition: weightmatrix.cpp:191
GENERIC_2D_ARRAY< double >::ResizeNoInit
void ResizeNoInit(int size1, int size2, int pad=0)
Definition: matrix.h:90
tesseract::DotProduct
DotProductFunction DotProduct
Definition: simddetect.cpp:50
tesseract::WeightMatrix::Serialize
bool Serialize(bool training, TFile *fp) const
Definition: weightmatrix.cpp:172
GenericVector::resize_no_init
void resize_no_init(int size)
Definition: genericvector.h:65
tesseract::TFile
Definition: serialis.h:75
tesseract::IntSimdMatrix::intSimdMatrix
static const IntSimdMatrix * intSimdMatrix
Definition: intsimdmatrix.h:116
tesseract::WeightMatrix::DeSerializeOld
bool DeSerializeOld(bool training, TFile *fp)
Definition: weightmatrix.cpp:216
tesseract::WeightMatrix::ConvertToInt
void ConvertToInt()
Definition: weightmatrix.cpp:125
tesseract::WeightMatrix::InitWeightsFloat
int InitWeightsFloat(int no, int ni, bool use_adam, float weight_range, TRand *randomizer)
Definition: weightmatrix.cpp:76
tesseract
Definition: baseapi.h:65
STATS
Definition: statistc.h:30
tprintf.h
tesseract::TRand::SignedRand
double SignedRand(double range)
Definition: helpers.h:85
GenericVector< float >
GENERIC_2D_ARRAY::Serialize
bool Serialize(FILE *fp) const
Definition: matrix.h:143
tesseract::IntSimdMatrix::MatrixDotVector
static void MatrixDotVector(const GENERIC_2D_ARRAY< int8_t > &w, const GenericVector< double > &scales, const int8_t *u, double *v)
Definition: intsimdmatrix.cpp:79
tesseract::WeightMatrix::RemapOutputs
int RemapOutputs(const std::vector< int > &code_map)
Definition: weightmatrix.cpp:97
GENERIC_2D_ARRAY::SumSquares
void SumSquares(const GENERIC_2D_ARRAY< T > &src, const T &decay_factor)
Definition: matrix.h:367
tesseract::WeightMatrix::MatrixDotVector
void MatrixDotVector(const double *u, double *v) const
Definition: weightmatrix.cpp:243
tesseract::TransposedArray
Definition: weightmatrix.h:32
STATS::add
void add(int32_t value, int32_t count)
Definition: statistc.cpp:87
tesstrain_utils.dest
dest
Definition: tesstrain_utils.py:139
tesseract::WeightMatrix::MultiplyAccumulate
void MultiplyAccumulate(const double *v, double *inout)
Definition: weightmatrix.cpp:260
GENERIC_2D_ARRAY::Resize
void Resize(int size1, int size2, const T &empty)
Definition: matrix.h:104
GenericVector::init_to_size
void init_to_size(int size, const T &t)
Definition: genericvector.h:706
tesseract::WeightMatrix::VectorDotMatrix
void VectorDotMatrix(const double *u, double *v) const
Definition: weightmatrix.cpp:274
tprintf
DLLSYM void tprintf(const char *format,...)
Definition: tprintf.cpp:34
intsimdmatrix.h
GENERIC_2D_ARRAY::AdamUpdate
void AdamUpdate(const GENERIC_2D_ARRAY< T > &sum, const GENERIC_2D_ARRAY< T > &sqsum, const T &epsilon)
Definition: matrix.h:378
tesseract::WeightMatrix::FloatToDouble
static void FloatToDouble(const GENERIC_2D_ARRAY< float > &wf, GENERIC_2D_ARRAY< double > *wd)
Definition: weightmatrix.cpp:399
GenericVector::size
int size() const
Definition: genericvector.h:71
tesseract::TRand
Definition: helpers.h:50
STATS::print
void print() const
Definition: statistc.cpp:509
tesseract::kDoubleFlag
const int kDoubleFlag
Definition: weightmatrix.cpp:169
GENERIC_2D_ARRAY::dim1
int dim1() const
Definition: matrix.h:205
tesseract::kInt8Flag
const int kInt8Flag
Definition: weightmatrix.cpp:163