tesseract  4.0.0-1-g2a2b
lstmtrainer.cpp
Go to the documentation of this file.
1 // File: lstmtrainer.cpp
3 // Description: Top-level line trainer class for LSTM-based networks.
4 // Author: Ray Smith
5 // Created: Fir May 03 09:14:06 PST 2013
6 //
7 // (C) Copyright 2013, Google Inc.
8 // Licensed under the Apache License, Version 2.0 (the "License");
9 // you may not use this file except in compliance with the License.
10 // You may obtain a copy of the License at
11 // http://www.apache.org/licenses/LICENSE-2.0
12 // Unless required by applicable law or agreed to in writing, software
13 // distributed under the License is distributed on an "AS IS" BASIS,
14 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 // See the License for the specific language governing permissions and
16 // limitations under the License.
18 
19 // Include automatically generated configuration file if running autoconf.
20 #ifdef HAVE_CONFIG_H
21 #include "config_auto.h"
22 #endif
23 
24 #include "lstmtrainer.h"
25 #include <string>
26 
27 #include "allheaders.h"
28 #include "boxread.h"
29 #include "ctc.h"
30 #include "imagedata.h"
31 #include "input.h"
32 #include "networkbuilder.h"
33 #include "ratngs.h"
34 #include "recodebeam.h"
35 #ifdef INCLUDE_TENSORFLOW
36 #include "tfnetwork.h"
37 #endif
38 #include "tprintf.h"
39 
40 #include "callcpp.h"
41 
42 namespace tesseract {
43 
44 // Min actual error rate increase to constitute divergence.
45 const double kMinDivergenceRate = 50.0;
46 // Min iterations since last best before acting on a stall.
47 const int kMinStallIterations = 10000;
48 // Fraction of current char error rate that sub_trainer_ has to be ahead
49 // before we declare the sub_trainer_ a success and switch to it.
50 const double kSubTrainerMarginFraction = 3.0 / 128;
51 // Factor to reduce learning rate on divergence.
52 const double kLearningRateDecay = sqrt(0.5);
53 // LR adjustment iterations.
54 const int kNumAdjustmentIterations = 100;
55 // How often to add data to the error_graph_.
56 const int kErrorGraphInterval = 1000;
57 // Number of training images to train between calls to MaintainCheckpoints.
58 const int kNumPagesPerBatch = 100;
59 // Min percent error rate to consider start-up phase over.
60 const int kMinStartedErrorRate = 75;
61 // Error rate at which to transition to stage 1.
62 const double kStageTransitionThreshold = 10.0;
63 // Confidence beyond which the truth is more likely wrong than the recognizer.
64 const double kHighConfidence = 0.9375; // 15/16.
65 // Fraction of weight sign-changing total to constitute a definite improvement.
66 const double kImprovementFraction = 15.0 / 16.0;
67 // Fraction of last written best to make it worth writing another.
68 const double kBestCheckpointFraction = 31.0 / 32.0;
69 // Scale factor for display of target activations of CTC.
70 const int kTargetXScale = 5;
71 const int kTargetYScale = 100;
72 
74  : randomly_rotate_(false),
75  training_data_(0),
76  file_reader_(LoadDataFromFile),
77  file_writer_(SaveDataToFile),
78  checkpoint_reader_(
79  NewPermanentTessCallback(this, &LSTMTrainer::ReadTrainingDump)),
80  checkpoint_writer_(
81  NewPermanentTessCallback(this, &LSTMTrainer::SaveTrainingDump)),
82  sub_trainer_(nullptr) {
84  debug_interval_ = 0;
85 }
86 
88  CheckPointReader checkpoint_reader,
89  CheckPointWriter checkpoint_writer,
90  const char* model_base, const char* checkpoint_name,
91  int debug_interval, int64_t max_memory)
92  : randomly_rotate_(false),
93  training_data_(max_memory),
94  file_reader_(file_reader),
95  file_writer_(file_writer),
96  checkpoint_reader_(checkpoint_reader),
97  checkpoint_writer_(checkpoint_writer),
98  sub_trainer_(nullptr),
99  mgr_(file_reader) {
101  if (file_reader_ == nullptr) file_reader_ = LoadDataFromFile;
102  if (file_writer_ == nullptr) file_writer_ = SaveDataToFile;
103  if (checkpoint_reader_ == nullptr) {
106  }
107  if (checkpoint_writer_ == nullptr) {
110  }
111  debug_interval_ = debug_interval;
112  model_base_ = model_base;
113  checkpoint_name_ = checkpoint_name;
114 }
115 
117  delete align_win_;
118  delete target_win_;
119  delete ctc_win_;
120  delete recon_win_;
121  delete checkpoint_reader_;
122  delete checkpoint_writer_;
123  delete sub_trainer_;
124 }
125 
126 // Tries to deserialize a trainer from the given file and silently returns
127 // false in case of failure.
128 bool LSTMTrainer::TryLoadingCheckpoint(const char* filename,
129  const char* old_traineddata) {
130  GenericVector<char> data;
131  if (!(*file_reader_)(filename, &data)) return false;
132  tprintf("Loaded file %s, unpacking...\n", filename);
133  if (!checkpoint_reader_->Run(data, this)) return false;
135  if (((old_traineddata == nullptr || *old_traineddata == '\0') &&
137  filename == old_traineddata) {
138  return true; // Normal checkpoint load complete.
139  }
140  tprintf("Code range changed from %d to %d!\n", network_->NumOutputs(),
141  recoder_.code_range());
142  if (old_traineddata == nullptr || *old_traineddata == '\0') {
143  tprintf("Must supply the old traineddata for code conversion!\n");
144  return false;
145  }
146  TessdataManager old_mgr;
147  ASSERT_HOST(old_mgr.Init(old_traineddata));
148  TFile fp;
149  if (!old_mgr.GetComponent(TESSDATA_LSTM_UNICHARSET, &fp)) return false;
150  UNICHARSET old_chset;
151  if (!old_chset.load_from_file(&fp, false)) return false;
152  if (!old_mgr.GetComponent(TESSDATA_LSTM_RECODER, &fp)) return false;
153  UnicharCompress old_recoder;
154  if (!old_recoder.DeSerialize(&fp)) return false;
155  std::vector<int> code_map = MapRecoder(old_chset, old_recoder);
156  // Set the null_char_ to the new value.
157  int old_null_char = null_char_;
158  SetNullChar();
159  // Map the softmax(s) in the network.
160  network_->RemapOutputs(old_recoder.code_range(), code_map);
161  tprintf("Previous null char=%d mapped to %d\n", old_null_char, null_char_);
162  return true;
163 }
164 
165 // Initializes the trainer with a network_spec in the network description
166 // net_flags control network behavior according to the NetworkFlags enum.
167 // There isn't really much difference between them - only where the effects
168 // are implemented.
169 // For other args see NetworkBuilder::InitNetwork.
170 // Note: Be sure to call InitCharSet before InitNetwork!
171 bool LSTMTrainer::InitNetwork(const STRING& network_spec, int append_index,
172  int net_flags, float weight_range,
173  float learning_rate, float momentum,
174  float adam_beta) {
175  mgr_.SetVersionString(mgr_.VersionString() + ":" + network_spec.string());
176  adam_beta_ = adam_beta;
178  momentum_ = momentum;
179  SetNullChar();
180  if (!NetworkBuilder::InitNetwork(recoder_.code_range(), network_spec,
181  append_index, net_flags, weight_range,
182  &randomizer_, &network_)) {
183  return false;
184  }
185  network_str_ += network_spec;
186  tprintf("Built network:%s from request %s\n",
187  network_->spec().string(), network_spec.string());
188  tprintf(
189  "Training parameters:\n Debug interval = %d,"
190  " weights = %g, learning rate = %g, momentum=%g\n",
191  debug_interval_, weight_range, learning_rate_, momentum_);
192  tprintf("null char=%d\n", null_char_);
193  return true;
194 }
195 
196 // Initializes a trainer from a serialized TFNetworkModel proto.
197 // Returns the global step of TensorFlow graph or 0 if failed.
198 int LSTMTrainer::InitTensorFlowNetwork(const std::string& tf_proto) {
199 #ifdef INCLUDE_TENSORFLOW
200  delete network_;
201  TFNetwork* tf_net = new TFNetwork("TensorFlow");
202  training_iteration_ = tf_net->InitFromProtoStr(tf_proto);
203  if (training_iteration_ == 0) {
204  tprintf("InitFromProtoStr failed!!\n");
205  return 0;
206  }
207  network_ = tf_net;
208  ASSERT_HOST(recoder_.code_range() == tf_net->num_classes());
209  return training_iteration_;
210 #else
211  tprintf("TensorFlow not compiled in! -DINCLUDE_TENSORFLOW\n");
212  return 0;
213 #endif
214 }
215 
216 // Resets all the iteration counters for fine tuning or traininng a head,
217 // where we want the error reporting to reset.
219  sample_iteration_ = 0;
223  best_error_rate_ = 100.0;
224  best_iteration_ = 0;
225  worst_error_rate_ = 0.0;
226  worst_iteration_ = 0;
229  perfect_delay_ = 0;
231  for (int i = 0; i < ET_COUNT; ++i) {
232  best_error_rates_[i] = 100.0;
233  worst_error_rates_[i] = 0.0;
235  error_rates_[i] = 100.0;
236  }
238 }
239 
240 // If the training sample is usable, grid searches for the optimal
241 // dict_ratio/cert_offset, and returns the results in a string of space-
242 // separated triplets of ratio,offset=worderr.
244  const ImageData* trainingdata, int iteration, double min_dict_ratio,
245  double dict_ratio_step, double max_dict_ratio, double min_cert_offset,
246  double cert_offset_step, double max_cert_offset, STRING* results) {
247  sample_iteration_ = iteration;
248  NetworkIO fwd_outputs, targets;
249  Trainability result =
250  PrepareForBackward(trainingdata, &fwd_outputs, &targets);
251  if (result == UNENCODABLE || result == HI_PRECISION_ERR || dict_ == nullptr)
252  return result;
253 
254  // Encode/decode the truth to get the normalization.
255  GenericVector<int> truth_labels, ocr_labels, xcoords;
256  ASSERT_HOST(EncodeString(trainingdata->transcription(), &truth_labels));
257  // NO-dict error.
258  RecodeBeamSearch base_search(recoder_, null_char_, SimpleTextOutput(), nullptr);
259  base_search.Decode(fwd_outputs, 1.0, 0.0, RecodeBeamSearch::kMinCertainty,
260  nullptr);
261  base_search.ExtractBestPathAsLabels(&ocr_labels, &xcoords);
262  STRING truth_text = DecodeLabels(truth_labels);
263  STRING ocr_text = DecodeLabels(ocr_labels);
264  double baseline_error = ComputeWordError(&truth_text, &ocr_text);
265  results->add_str_double("0,0=", baseline_error);
266 
268  for (double r = min_dict_ratio; r < max_dict_ratio; r += dict_ratio_step) {
269  for (double c = min_cert_offset; c < max_cert_offset;
270  c += cert_offset_step) {
271  search.Decode(fwd_outputs, r, c, RecodeBeamSearch::kMinCertainty, nullptr);
272  search.ExtractBestPathAsLabels(&ocr_labels, &xcoords);
273  truth_text = DecodeLabels(truth_labels);
274  ocr_text = DecodeLabels(ocr_labels);
275  // This is destructive on both strings.
276  double word_error = ComputeWordError(&truth_text, &ocr_text);
277  if ((r == min_dict_ratio && c == min_cert_offset) ||
278  !std::isfinite(word_error)) {
279  STRING t = DecodeLabels(truth_labels);
280  STRING o = DecodeLabels(ocr_labels);
281  tprintf("r=%g, c=%g, truth=%s, ocr=%s, wderr=%g, truth[0]=%d\n", r, c,
282  t.string(), o.string(), word_error, truth_labels[0]);
283  }
284  results->add_str_double(" ", r);
285  results->add_str_double(",", c);
286  results->add_str_double("=", word_error);
287  }
288  }
289  return result;
290 }
291 
292 // Provides output on the distribution of weight values.
295 }
296 
297 // Loads a set of lstmf files that were created using the lstm.train config to
298 // tesseract into memory ready for training. Returns false if nothing was
299 // loaded.
301  CachingStrategy cache_strategy,
302  bool randomly_rotate) {
303  randomly_rotate_ = randomly_rotate;
305  return training_data_.LoadDocuments(filenames, cache_strategy, file_reader_);
306 }
307 
308 // Keeps track of best and locally worst char error_rate and launches tests
309 // using tester, when a new min or max is reached.
310 // Writes checkpoints at appropriate times and builds and returns a log message
311 // to indicate progress. Returns false if nothing interesting happened.
313  PrepareLogMsg(log_msg);
314  double error_rate = CharError();
315  int iteration = learning_iteration();
316  if (iteration >= stall_iteration_ &&
317  error_rate > best_error_rate_ * (1.0 + kSubTrainerMarginFraction) &&
319  // It hasn't got any better in a long while, and is a margin worse than the
320  // best, so go back to the best model and try a different learning rate.
321  StartSubtrainer(log_msg);
322  }
323  SubTrainerResult sub_trainer_result = STR_NONE;
324  if (sub_trainer_ != nullptr) {
325  sub_trainer_result = UpdateSubtrainer(log_msg);
326  if (sub_trainer_result == STR_REPLACED) {
327  // Reset the inputs, as we have overwritten *this.
328  error_rate = CharError();
329  iteration = learning_iteration();
330  PrepareLogMsg(log_msg);
331  }
332  }
333  bool result = true; // Something interesting happened.
334  GenericVector<char> rec_model_data;
335  if (error_rate < best_error_rate_) {
336  SaveRecognitionDump(&rec_model_data);
337  log_msg->add_str_double(" New best char error = ", error_rate);
338  *log_msg += UpdateErrorGraph(iteration, error_rate, rec_model_data, tester);
339  // If sub_trainer_ is not nullptr, either *this beat it to a new best, or it
340  // just overwrote *this. In either case, we have finished with it.
341  delete sub_trainer_;
342  sub_trainer_ = nullptr;
345  log_msg->add_str_int(" Transitioned to stage ", CurrentTrainingStage());
346  }
349  STRING best_model_name = DumpFilename();
350  if (!(*file_writer_)(best_trainer_, best_model_name)) {
351  *log_msg += " failed to write best model:";
352  } else {
353  *log_msg += " wrote best model:";
355  }
356  *log_msg += best_model_name;
357  }
358  } else if (error_rate > worst_error_rate_) {
359  SaveRecognitionDump(&rec_model_data);
360  log_msg->add_str_double(" New worst char error = ", error_rate);
361  *log_msg += UpdateErrorGraph(iteration, error_rate, rec_model_data, tester);
364  // Error rate has ballooned. Go back to the best model.
365  *log_msg += "\nDivergence! ";
366  // Copy best_trainer_ before reading it, as it will get overwritten.
367  GenericVector<char> revert_data(best_trainer_);
368  if (checkpoint_reader_->Run(revert_data, this)) {
369  LogIterations("Reverted to", log_msg);
370  ReduceLearningRates(this, log_msg);
371  } else {
372  LogIterations("Failed to Revert at", log_msg);
373  }
374  // If it fails again, we will wait twice as long before reverting again.
375  stall_iteration_ = iteration + 2 * (iteration - learning_iteration());
376  // Re-save the best trainer with the new learning rates and stall
377  // iteration.
379  }
380  } else {
381  // Something interesting happened only if the sub_trainer_ was trained.
382  result = sub_trainer_result != STR_NONE;
383  }
384  if (checkpoint_writer_ != nullptr && file_writer_ != nullptr &&
385  checkpoint_name_.length() > 0) {
386  // Write a current checkpoint.
387  GenericVector<char> checkpoint;
388  if (!checkpoint_writer_->Run(FULL, this, &checkpoint) ||
389  !(*file_writer_)(checkpoint, checkpoint_name_)) {
390  *log_msg += " failed to write checkpoint.";
391  } else {
392  *log_msg += " wrote checkpoint.";
393  }
394  }
395  *log_msg += "\n";
396  return result;
397 }
398 
399 // Builds a string containing a progress message with current error rates.
400 void LSTMTrainer::PrepareLogMsg(STRING* log_msg) const {
401  LogIterations("At", log_msg);
402  log_msg->add_str_double(", Mean rms=", error_rates_[ET_RMS]);
403  log_msg->add_str_double("%, delta=", error_rates_[ET_DELTA]);
404  log_msg->add_str_double("%, char train=", error_rates_[ET_CHAR_ERROR]);
405  log_msg->add_str_double("%, word train=", error_rates_[ET_WORD_RECERR]);
406  log_msg->add_str_double("%, skip ratio=", error_rates_[ET_SKIP_RATIO]);
407  *log_msg += "%, ";
408 }
409 
410 // Appends <intro_str> iteration learning_iteration()/training_iteration()/
411 // sample_iteration() to the log_msg.
412 void LSTMTrainer::LogIterations(const char* intro_str, STRING* log_msg) const {
413  *log_msg += intro_str;
414  log_msg->add_str_int(" iteration ", learning_iteration());
415  log_msg->add_str_int("/", training_iteration());
416  log_msg->add_str_int("/", sample_iteration());
417 }
418 
419 // Returns true and increments the training_stage_ if the error rate has just
420 // passed through the given threshold for the first time.
421 bool LSTMTrainer::TransitionTrainingStage(float error_threshold) {
422  if (best_error_rate_ < error_threshold &&
424  ++training_stage_;
425  return true;
426  }
427  return false;
428 }
429 
430 // Writes to the given file. Returns false in case of error.
432  const TessdataManager* mgr, TFile* fp) const {
433  if (!LSTMRecognizer::Serialize(mgr, fp)) return false;
434  if (!fp->Serialize(&learning_iteration_)) return false;
435  if (!fp->Serialize(&prev_sample_iteration_)) return false;
436  if (!fp->Serialize(&perfect_delay_)) return false;
437  if (!fp->Serialize(&last_perfect_training_iteration_)) return false;
438  for (int i = 0; i < ET_COUNT; ++i) {
439  if (!error_buffers_[i].Serialize(fp)) return false;
440  }
441  if (!fp->Serialize(&error_rates_[0], countof(error_rates_))) return false;
442  if (!fp->Serialize(&training_stage_)) return false;
443  uint8_t amount = serialize_amount;
444  if (!fp->Serialize(&amount)) return false;
445  if (serialize_amount == LIGHT) return true; // We are done.
446  if (!fp->Serialize(&best_error_rate_)) return false;
447  if (!fp->Serialize(&best_error_rates_[0], countof(best_error_rates_))) return false;
448  if (!fp->Serialize(&best_iteration_)) return false;
449  if (!fp->Serialize(&worst_error_rate_)) return false;
450  if (!fp->Serialize(&worst_error_rates_[0], countof(worst_error_rates_))) return false;
451  if (!fp->Serialize(&worst_iteration_)) return false;
452  if (!fp->Serialize(&stall_iteration_)) return false;
453  if (!best_model_data_.Serialize(fp)) return false;
454  if (!worst_model_data_.Serialize(fp)) return false;
455  if (serialize_amount != NO_BEST_TRAINER && !best_trainer_.Serialize(fp))
456  return false;
457  GenericVector<char> sub_data;
458  if (sub_trainer_ != nullptr && !SaveTrainingDump(LIGHT, sub_trainer_, &sub_data))
459  return false;
460  if (!sub_data.Serialize(fp)) return false;
461  if (!best_error_history_.Serialize(fp)) return false;
462  if (!best_error_iterations_.Serialize(fp)) return false;
463  return fp->Serialize(&improvement_steps_);
464 }
465 
466 // Reads from the given file. Returns false in case of error.
467 // NOTE: It is assumed that the trainer is never read cross-endian.
469  if (!LSTMRecognizer::DeSerialize(mgr, fp)) return false;
470  if (!fp->DeSerialize(&learning_iteration_)) {
471  // Special case. If we successfully decoded the recognizer, but fail here
472  // then it means we were just given a recognizer, so issue a warning and
473  // allow it.
474  tprintf("Warning: LSTMTrainer deserialized an LSTMRecognizer!\n");
477  return true;
478  }
479  if (!fp->DeSerialize(&prev_sample_iteration_)) return false;
480  if (!fp->DeSerialize(&perfect_delay_)) return false;
481  if (!fp->DeSerialize(&last_perfect_training_iteration_)) return false;
482  for (int i = 0; i < ET_COUNT; ++i) {
483  if (!error_buffers_[i].DeSerialize(fp)) return false;
484  }
485  if (!fp->DeSerialize(&error_rates_[0], countof(error_rates_))) return false;
486  if (!fp->DeSerialize(&training_stage_)) return false;
487  uint8_t amount;
488  if (!fp->DeSerialize(&amount)) return false;
489  if (amount == LIGHT) return true; // Don't read the rest.
490  if (!fp->DeSerialize(&best_error_rate_)) return false;
491  if (!fp->DeSerialize(&best_error_rates_[0], countof(best_error_rates_))) return false;
492  if (!fp->DeSerialize(&best_iteration_)) return false;
493  if (!fp->DeSerialize(&worst_error_rate_)) return false;
494  if (!fp->DeSerialize(&worst_error_rates_[0], countof(worst_error_rates_))) return false;
495  if (!fp->DeSerialize(&worst_iteration_)) return false;
496  if (!fp->DeSerialize(&stall_iteration_)) return false;
497  if (!best_model_data_.DeSerialize(fp)) return false;
498  if (!worst_model_data_.DeSerialize(fp)) return false;
499  if (amount != NO_BEST_TRAINER && !best_trainer_.DeSerialize(fp)) return false;
500  GenericVector<char> sub_data;
501  if (!sub_data.DeSerialize(fp)) return false;
502  delete sub_trainer_;
503  if (sub_data.empty()) {
504  sub_trainer_ = nullptr;
505  } else {
506  sub_trainer_ = new LSTMTrainer();
507  if (!ReadTrainingDump(sub_data, sub_trainer_)) return false;
508  }
509  if (!best_error_history_.DeSerialize(fp)) return false;
510  if (!best_error_iterations_.DeSerialize(fp)) return false;
511  return fp->DeSerialize(&improvement_steps_);
512 }
513 
514 // De-serializes the saved best_trainer_ into sub_trainer_, and adjusts the
515 // learning rates (by scaling reduction, or layer specific, according to
516 // NF_LAYER_SPECIFIC_LR).
518  delete sub_trainer_;
519  sub_trainer_ = new LSTMTrainer();
521  *log_msg += " Failed to revert to previous best for trial!";
522  delete sub_trainer_;
523  sub_trainer_ = nullptr;
524  } else {
525  log_msg->add_str_int(" Trial sub_trainer_ from iteration ",
527  // Reduce learning rate so it doesn't diverge this time.
528  sub_trainer_->ReduceLearningRates(this, log_msg);
529  // If it fails again, we will wait twice as long before reverting again.
530  int stall_offset =
532  stall_iteration_ = learning_iteration() + 2 * stall_offset;
534  // Re-save the best trainer with the new learning rates and stall iteration.
536  }
537 }
538 
539 // While the sub_trainer_ is behind the current training iteration and its
540 // training error is at least kSubTrainerMarginFraction better than the
541 // current training error, trains the sub_trainer_, and returns STR_UPDATED if
542 // it did anything. If it catches up, and has a better error rate than the
543 // current best, as well as a margin over the current error rate, then the
544 // trainer in *this is replaced with sub_trainer_, and STR_REPLACED is
545 // returned. STR_NONE is returned if the subtrainer wasn't good enough to
546 // receive any training iterations.
548  double training_error = CharError();
549  double sub_error = sub_trainer_->CharError();
550  double sub_margin = (training_error - sub_error) / sub_error;
551  if (sub_margin >= kSubTrainerMarginFraction) {
552  log_msg->add_str_double(" sub_trainer=", sub_error);
553  log_msg->add_str_double(" margin=", 100.0 * sub_margin);
554  *log_msg += "\n";
555  // Catch up to current iteration.
556  int end_iteration = training_iteration();
557  while (sub_trainer_->training_iteration() < end_iteration &&
558  sub_margin >= kSubTrainerMarginFraction) {
559  int target_iteration =
561  while (sub_trainer_->training_iteration() < target_iteration) {
562  sub_trainer_->TrainOnLine(this, false);
563  }
564  STRING batch_log = "Sub:";
565  sub_trainer_->PrepareLogMsg(&batch_log);
566  batch_log += "\n";
567  tprintf("UpdateSubtrainer:%s", batch_log.string());
568  *log_msg += batch_log;
569  sub_error = sub_trainer_->CharError();
570  sub_margin = (training_error - sub_error) / sub_error;
571  }
572  if (sub_error < best_error_rate_ &&
573  sub_margin >= kSubTrainerMarginFraction) {
574  // The sub_trainer_ has won the race to a new best. Switch to it.
575  GenericVector<char> updated_trainer;
576  SaveTrainingDump(LIGHT, sub_trainer_, &updated_trainer);
577  ReadTrainingDump(updated_trainer, this);
578  log_msg->add_str_int(" Sub trainer wins at iteration ",
580  *log_msg += "\n";
581  return STR_REPLACED;
582  }
583  return STR_UPDATED;
584  }
585  return STR_NONE;
586 }
587 
588 // Reduces network learning rates, either for everything, or for layers
589 // independently, according to NF_LAYER_SPECIFIC_LR.
591  STRING* log_msg) {
593  int num_reduced = ReduceLayerLearningRates(
594  kLearningRateDecay, kNumAdjustmentIterations, samples_trainer);
595  log_msg->add_str_int("\nReduced learning rate on layers: ", num_reduced);
596  } else {
598  log_msg->add_str_double("\nReduced learning rate to :", learning_rate_);
599  }
600  *log_msg += "\n";
601 }
602 
603 // Considers reducing the learning rate independently for each layer down by
604 // factor(<1), or leaving it the same, by double-training the given number of
605 // samples and minimizing the amount of changing of sign of weight updates.
606 // Even if it looks like all weights should remain the same, an adjustment
607 // will be made to guarantee a different result when reverting to an old best.
608 // Returns the number of layer learning rates that were reduced.
609 int LSTMTrainer::ReduceLayerLearningRates(double factor, int num_samples,
610  LSTMTrainer* samples_trainer) {
611  enum WhichWay {
612  LR_DOWN, // Learning rate will go down by factor.
613  LR_SAME, // Learning rate will stay the same.
614  LR_COUNT // Size of arrays.
615  };
617  int num_layers = layers.size();
618  GenericVector<int> num_weights;
619  num_weights.init_to_size(num_layers, 0);
620  GenericVector<double> bad_sums[LR_COUNT];
621  GenericVector<double> ok_sums[LR_COUNT];
622  for (int i = 0; i < LR_COUNT; ++i) {
623  bad_sums[i].init_to_size(num_layers, 0.0);
624  ok_sums[i].init_to_size(num_layers, 0.0);
625  }
626  double momentum_factor = 1.0 / (1.0 - momentum_);
627  GenericVector<char> orig_trainer;
628  samples_trainer->SaveTrainingDump(LIGHT, this, &orig_trainer);
629  for (int i = 0; i < num_layers; ++i) {
630  Network* layer = GetLayer(layers[i]);
631  num_weights[i] = layer->IsTraining() ? layer->num_weights() : 0;
632  }
633  int iteration = sample_iteration();
634  for (int s = 0; s < num_samples; ++s) {
635  // Which way will we modify the learning rate?
636  for (int ww = 0; ww < LR_COUNT; ++ww) {
637  // Transfer momentum to learning rate and adjust by the ww factor.
638  float ww_factor = momentum_factor;
639  if (ww == LR_DOWN) ww_factor *= factor;
640  // Make a copy of *this, so we can mess about without damaging anything.
641  LSTMTrainer copy_trainer;
642  samples_trainer->ReadTrainingDump(orig_trainer, &copy_trainer);
643  // Clear the updates, doing nothing else.
644  copy_trainer.network_->Update(0.0, 0.0, 0.0, 0);
645  // Adjust the learning rate in each layer.
646  for (int i = 0; i < num_layers; ++i) {
647  if (num_weights[i] == 0) continue;
648  copy_trainer.ScaleLayerLearningRate(layers[i], ww_factor);
649  }
650  copy_trainer.SetIteration(iteration);
651  // Train on the sample, but keep the update in updates_ instead of
652  // applying to the weights.
653  const ImageData* trainingdata =
654  copy_trainer.TrainOnLine(samples_trainer, true);
655  if (trainingdata == nullptr) continue;
656  // We'll now use this trainer again for each layer.
657  GenericVector<char> updated_trainer;
658  samples_trainer->SaveTrainingDump(LIGHT, &copy_trainer, &updated_trainer);
659  for (int i = 0; i < num_layers; ++i) {
660  if (num_weights[i] == 0) continue;
661  LSTMTrainer layer_trainer;
662  samples_trainer->ReadTrainingDump(updated_trainer, &layer_trainer);
663  Network* layer = layer_trainer.GetLayer(layers[i]);
664  // Update the weights in just the layer, using Adam if enabled.
665  layer->Update(0.0, momentum_, adam_beta_,
666  layer_trainer.training_iteration_ + 1);
667  // Zero the updates matrix again.
668  layer->Update(0.0, 0.0, 0.0, 0);
669  // Train again on the same sample, again holding back the updates.
670  layer_trainer.TrainOnLine(trainingdata, true);
671  // Count the sign changes in the updates in layer vs in copy_trainer.
672  float before_bad = bad_sums[ww][i];
673  float before_ok = ok_sums[ww][i];
674  layer->CountAlternators(*copy_trainer.GetLayer(layers[i]),
675  &ok_sums[ww][i], &bad_sums[ww][i]);
676  float bad_frac =
677  bad_sums[ww][i] + ok_sums[ww][i] - before_bad - before_ok;
678  if (bad_frac > 0.0f)
679  bad_frac = (bad_sums[ww][i] - before_bad) / bad_frac;
680  }
681  }
682  ++iteration;
683  }
684  int num_lowered = 0;
685  for (int i = 0; i < num_layers; ++i) {
686  if (num_weights[i] == 0) continue;
687  Network* layer = GetLayer(layers[i]);
688  float lr = GetLayerLearningRate(layers[i]);
689  double total_down = bad_sums[LR_DOWN][i] + ok_sums[LR_DOWN][i];
690  double total_same = bad_sums[LR_SAME][i] + ok_sums[LR_SAME][i];
691  double frac_down = bad_sums[LR_DOWN][i] / total_down;
692  double frac_same = bad_sums[LR_SAME][i] / total_same;
693  tprintf("Layer %d=%s: lr %g->%g%%, lr %g->%g%%", i, layer->name().string(),
694  lr * factor, 100.0 * frac_down, lr, 100.0 * frac_same);
695  if (frac_down < frac_same * kImprovementFraction) {
696  tprintf(" REDUCED\n");
697  ScaleLayerLearningRate(layers[i], factor);
698  ++num_lowered;
699  } else {
700  tprintf(" SAME\n");
701  }
702  }
703  if (num_lowered == 0) {
704  // Just lower everything to make sure.
705  for (int i = 0; i < num_layers; ++i) {
706  if (num_weights[i] > 0) {
707  ScaleLayerLearningRate(layers[i], factor);
708  ++num_lowered;
709  }
710  }
711  }
712  return num_lowered;
713 }
714 
715 // Converts the string to integer class labels, with appropriate null_char_s
716 // in between if not in SimpleTextOutput mode. Returns false on failure.
717 /* static */
718 bool LSTMTrainer::EncodeString(const STRING& str, const UNICHARSET& unicharset,
719  const UnicharCompress* recoder, bool simple_text,
720  int null_char, GenericVector<int>* labels) {
721  if (str.string() == nullptr || str.length() <= 0) {
722  tprintf("Empty truth string!\n");
723  return false;
724  }
725  int err_index;
726  GenericVector<int> internal_labels;
727  labels->truncate(0);
728  if (!simple_text) labels->push_back(null_char);
729  std::string cleaned = unicharset.CleanupString(str.string());
730  if (unicharset.encode_string(cleaned.c_str(), true, &internal_labels, nullptr,
731  &err_index)) {
732  bool success = true;
733  for (int i = 0; i < internal_labels.size(); ++i) {
734  if (recoder != nullptr) {
735  // Re-encode labels via recoder.
736  RecodedCharID code;
737  int len = recoder->EncodeUnichar(internal_labels[i], &code);
738  if (len > 0) {
739  for (int j = 0; j < len; ++j) {
740  labels->push_back(code(j));
741  if (!simple_text) labels->push_back(null_char);
742  }
743  } else {
744  success = false;
745  err_index = 0;
746  break;
747  }
748  } else {
749  labels->push_back(internal_labels[i]);
750  if (!simple_text) labels->push_back(null_char);
751  }
752  }
753  if (success) return true;
754  }
755  tprintf("Encoding of string failed! Failure bytes:");
756  while (err_index < cleaned.size()) {
757  tprintf(" %x", cleaned[err_index++]);
758  }
759  tprintf("\n");
760  return false;
761 }
762 
763 // Performs forward-backward on the given trainingdata.
764 // Returns a Trainability enum to indicate the suitability of the sample.
766  bool batch) {
767  NetworkIO fwd_outputs, targets;
768  Trainability trainable =
769  PrepareForBackward(trainingdata, &fwd_outputs, &targets);
771  if (trainable == UNENCODABLE || trainable == NOT_BOXED) {
772  return trainable; // Sample was unusable.
773  }
774  bool debug = debug_interval_ > 0 &&
776  // Run backprop on the output.
777  NetworkIO bp_deltas;
778  if (network_->IsTraining() &&
779  (trainable != PERFECT ||
782  network_->Backward(debug, targets, &scratch_space_, &bp_deltas);
784  training_iteration_ + 1);
785  }
786 #ifndef GRAPHICS_DISABLED
787  if (debug_interval_ == 1 && debug_win_ != nullptr) {
789  }
790 #endif // GRAPHICS_DISABLED
791  // Roll the memory of past means.
793  return trainable;
794 }
795 
796 // Prepares the ground truth, runs forward, and prepares the targets.
797 // Returns a Trainability enum to indicate the suitability of the sample.
799  NetworkIO* fwd_outputs,
800  NetworkIO* targets) {
801  if (trainingdata == nullptr) {
802  tprintf("Null trainingdata.\n");
803  return UNENCODABLE;
804  }
805  // Ensure repeatability of random elements even across checkpoints.
806  bool debug = debug_interval_ > 0 &&
808  GenericVector<int> truth_labels;
809  if (!EncodeString(trainingdata->transcription(), &truth_labels)) {
810  tprintf("Can't encode transcription: '%s' in language '%s'\n",
811  trainingdata->transcription().string(),
812  trainingdata->language().string());
813  return UNENCODABLE;
814  }
815  bool upside_down = false;
816  if (randomly_rotate_) {
817  // This ensures consistent training results.
818  SetRandomSeed();
819  upside_down = randomizer_.SignedRand(1.0) > 0.0;
820  if (upside_down) {
821  // Modify the truth labels to match the rotation:
822  // Apart from space and null, increment the label. This is changes the
823  // script-id to the same script-id but upside-down.
824  // The labels need to be reversed in order, as the first is now the last.
825  for (int c = 0; c < truth_labels.size(); ++c) {
826  if (truth_labels[c] != UNICHAR_SPACE && truth_labels[c] != null_char_)
827  ++truth_labels[c];
828  }
829  truth_labels.reverse();
830  }
831  }
832  int w = 0;
833  while (w < truth_labels.size() &&
834  (truth_labels[w] == UNICHAR_SPACE || truth_labels[w] == null_char_))
835  ++w;
836  if (w == truth_labels.size()) {
837  tprintf("Blank transcription: %s\n",
838  trainingdata->transcription().string());
839  return UNENCODABLE;
840  }
841  float image_scale;
842  NetworkIO inputs;
843  bool invert = trainingdata->boxes().empty();
844  if (!RecognizeLine(*trainingdata, invert, debug, invert, upside_down,
845  &image_scale, &inputs, fwd_outputs)) {
846  tprintf("Image not trainable\n");
847  return UNENCODABLE;
848  }
849  targets->Resize(*fwd_outputs, network_->NumOutputs());
850  LossType loss_type = OutputLossType();
851  if (loss_type == LT_SOFTMAX) {
852  if (!ComputeTextTargets(*fwd_outputs, truth_labels, targets)) {
853  tprintf("Compute simple targets failed!\n");
854  return UNENCODABLE;
855  }
856  } else if (loss_type == LT_CTC) {
857  if (!ComputeCTCTargets(truth_labels, fwd_outputs, targets)) {
858  tprintf("Compute CTC targets failed!\n");
859  return UNENCODABLE;
860  }
861  } else {
862  tprintf("Logistic outputs not implemented yet!\n");
863  return UNENCODABLE;
864  }
865  GenericVector<int> ocr_labels;
866  GenericVector<int> xcoords;
867  LabelsFromOutputs(*fwd_outputs, &ocr_labels, &xcoords);
868  // CTC does not produce correct target labels to begin with.
869  if (loss_type != LT_CTC) {
870  LabelsFromOutputs(*targets, &truth_labels, &xcoords);
871  }
872  if (!DebugLSTMTraining(inputs, *trainingdata, *fwd_outputs, truth_labels,
873  *targets)) {
874  tprintf("Input width was %d\n", inputs.Width());
875  return UNENCODABLE;
876  }
877  STRING ocr_text = DecodeLabels(ocr_labels);
878  STRING truth_text = DecodeLabels(truth_labels);
879  targets->SubtractAllFromFloat(*fwd_outputs);
880  if (debug_interval_ != 0) {
881  tprintf("Iteration %d: BEST OCR TEXT : %s\n", training_iteration(),
882  ocr_text.string());
883  }
884  double char_error = ComputeCharError(truth_labels, ocr_labels);
885  double word_error = ComputeWordError(&truth_text, &ocr_text);
886  double delta_error = ComputeErrorRates(*targets, char_error, word_error);
887  if (debug_interval_ != 0) {
888  tprintf("File %s page %d %s:\n", trainingdata->imagefilename().string(),
889  trainingdata->page_number(), delta_error == 0.0 ? "(Perfect)" : "");
890  }
891  if (delta_error == 0.0) return PERFECT;
893  return TRAINABLE;
894 }
895 
896 // Writes the trainer to memory, so that the current training state can be
897 // restored. *this must always be the master trainer that retains the only
898 // copy of the training data and language model. trainer is the model that is
899 // actually serialized.
901  const LSTMTrainer* trainer,
902  GenericVector<char>* data) const {
903  TFile fp;
904  fp.OpenWrite(data);
905  return trainer->Serialize(serialize_amount, &mgr_, &fp);
906 }
907 
908 // Restores the model to *this.
910  const char* data, int size) {
911  if (size == 0) {
912  tprintf("Warning: data size is 0 in LSTMTrainer::ReadLocalTrainingDump\n");
913  return false;
914  }
915  TFile fp;
916  fp.Open(data, size);
917  return DeSerialize(mgr, &fp);
918 }
919 
920 // Writes the full recognition traineddata to the given filename.
921 bool LSTMTrainer::SaveTraineddata(const STRING& filename) {
922  GenericVector<char> recognizer_data;
923  SaveRecognitionDump(&recognizer_data);
924  mgr_.OverwriteEntry(TESSDATA_LSTM, &recognizer_data[0],
925  recognizer_data.size());
926  return mgr_.SaveFile(filename, file_writer_);
927 }
928 
929 // Writes the recognizer to memory, so that it can be used for testing later.
931  TFile fp;
932  fp.OpenWrite(data);
936 }
937 
938 // Returns a suitable filename for a training dump, based on the model_base_,
939 // the iteration and the error rates.
941  STRING filename;
943  filename.add_str_int("_", best_iteration_);
944  filename += ".checkpoint";
945  return filename;
946 }
947 
948 // Fills the whole error buffer of the given type with the given value.
949 void LSTMTrainer::FillErrorBuffer(double new_error, ErrorTypes type) {
950  for (int i = 0; i < kRollingBufferSize_; ++i)
951  error_buffers_[type][i] = new_error;
952  error_rates_[type] = 100.0 * new_error;
953 }
954 
955 // Helper generates a map from each current recoder_ code (ie softmax index)
956 // to the corresponding old_recoder code, or -1 if there isn't one.
957 std::vector<int> LSTMTrainer::MapRecoder(
958  const UNICHARSET& old_chset, const UnicharCompress& old_recoder) const {
959  int num_new_codes = recoder_.code_range();
960  int num_new_unichars = GetUnicharset().size();
961  std::vector<int> code_map(num_new_codes, -1);
962  for (int c = 0; c < num_new_codes; ++c) {
963  int old_code = -1;
964  // Find all new unichar_ids that recode to something that includes c.
965  // The <= is to include the null char, which may be beyond the unicharset.
966  for (int uid = 0; uid <= num_new_unichars; ++uid) {
967  RecodedCharID codes;
968  int length = recoder_.EncodeUnichar(uid, &codes);
969  int code_index = 0;
970  while (code_index < length && codes(code_index) != c) ++code_index;
971  if (code_index == length) continue;
972  // The old unicharset must have the same unichar.
973  int old_uid =
974  uid < num_new_unichars
975  ? old_chset.unichar_to_id(GetUnicharset().id_to_unichar(uid))
976  : old_chset.size() - 1;
977  if (old_uid == INVALID_UNICHAR_ID) continue;
978  // The encoding of old_uid at the same code_index is the old code.
979  RecodedCharID old_codes;
980  if (code_index < old_recoder.EncodeUnichar(old_uid, &old_codes)) {
981  old_code = old_codes(code_index);
982  break;
983  }
984  }
985  code_map[c] = old_code;
986  }
987  return code_map;
988 }
989 
990 // Private version of InitCharSet above finishes the job after initializing
991 // the mgr_ data member.
995  // Initialize the unicharset and recoder.
996  if (!LoadCharsets(&mgr_)) {
997  ASSERT_HOST(
998  "Must provide a traineddata containing lstm_unicharset and"
999  " lstm_recoder!\n" != nullptr);
1000  }
1001  SetNullChar();
1002 }
1003 
1004 // Helper computes and sets the null_char_.
1007  : GetUnicharset().size();
1008  RecodedCharID code;
1010  null_char_ = code(0);
1011 }
1012 
1013 // Factored sub-constructor sets up reasonable default values.
1015  align_win_ = nullptr;
1016  target_win_ = nullptr;
1017  ctc_win_ = nullptr;
1018  recon_win_ = nullptr;
1020  training_stage_ = 0;
1022  InitIterations();
1023 }
1024 
1025 // Outputs the string and periodically displays the given network inputs
1026 // as an image in the given window, and the corresponding labels at the
1027 // corresponding x_starts.
1028 // Returns false if the truth string is empty.
1030  const ImageData& trainingdata,
1031  const NetworkIO& fwd_outputs,
1032  const GenericVector<int>& truth_labels,
1033  const NetworkIO& outputs) {
1034  const STRING& truth_text = DecodeLabels(truth_labels);
1035  if (truth_text.string() == nullptr || truth_text.length() <= 0) {
1036  tprintf("Empty truth string at decode time!\n");
1037  return false;
1038  }
1039  if (debug_interval_ != 0) {
1040  // Get class labels, xcoords and string.
1041  GenericVector<int> labels;
1042  GenericVector<int> xcoords;
1043  LabelsFromOutputs(outputs, &labels, &xcoords);
1044  STRING text = DecodeLabels(labels);
1045  tprintf("Iteration %d: ALIGNED TRUTH : %s\n",
1046  training_iteration(), text.string());
1047  if (debug_interval_ > 0 && training_iteration() % debug_interval_ == 0) {
1048  tprintf("TRAINING activation path for truth string %s\n",
1049  truth_text.string());
1050  DebugActivationPath(outputs, labels, xcoords);
1051  DisplayForward(inputs, labels, xcoords, "LSTMTraining", &align_win_);
1052  if (OutputLossType() == LT_CTC) {
1053  DisplayTargets(fwd_outputs, "CTC Outputs", &ctc_win_);
1054  DisplayTargets(outputs, "CTC Targets", &target_win_);
1055  }
1056  }
1057  }
1058  return true;
1059 }
1060 
1061 // Displays the network targets as line a line graph.
1063  const char* window_name, ScrollView** window) {
1064 #ifndef GRAPHICS_DISABLED // do nothing if there's no graphics.
1065  int width = targets.Width();
1066  int num_features = targets.NumFeatures();
1067  Network::ClearWindow(true, window_name, width * kTargetXScale, kTargetYScale,
1068  window);
1069  for (int c = 0; c < num_features; ++c) {
1070  int color = c % (ScrollView::GREEN_YELLOW - 1) + 2;
1071  (*window)->Pen(static_cast<ScrollView::Color>(color));
1072  int start_t = -1;
1073  for (int t = 0; t < width; ++t) {
1074  double target = targets.f(t)[c];
1075  target *= kTargetYScale;
1076  if (target >= 1) {
1077  if (start_t < 0) {
1078  (*window)->SetCursor(t - 1, 0);
1079  start_t = t;
1080  }
1081  (*window)->DrawTo(t, target);
1082  } else if (start_t >= 0) {
1083  (*window)->DrawTo(t, 0);
1084  (*window)->DrawTo(start_t - 1, 0);
1085  start_t = -1;
1086  }
1087  }
1088  if (start_t >= 0) {
1089  (*window)->DrawTo(width, 0);
1090  (*window)->DrawTo(start_t - 1, 0);
1091  }
1092  }
1093  (*window)->Update();
1094 #endif // GRAPHICS_DISABLED
1095 }
1096 
1097 // Builds a no-compromises target where the first positions should be the
1098 // truth labels and the rest is padded with the null_char_.
1100  const GenericVector<int>& truth_labels,
1101  NetworkIO* targets) {
1102  if (truth_labels.size() > targets->Width()) {
1103  tprintf("Error: transcription %s too long to fit into target of width %d\n",
1104  DecodeLabels(truth_labels).string(), targets->Width());
1105  return false;
1106  }
1107  for (int i = 0; i < truth_labels.size() && i < targets->Width(); ++i) {
1108  targets->SetActivations(i, truth_labels[i], 1.0);
1109  }
1110  for (int i = truth_labels.size(); i < targets->Width(); ++i) {
1111  targets->SetActivations(i, null_char_, 1.0);
1112  }
1113  return true;
1114 }
1115 
1116 // Builds a target using standard CTC. truth_labels should be pre-padded with
1117 // nulls wherever desired. They don't have to be between all labels.
1118 // outputs is input-output, as it gets clipped to minimum probability.
1120  NetworkIO* outputs, NetworkIO* targets) {
1121  // Bottom-clip outputs to a minimum probability.
1122  CTC::NormalizeProbs(outputs);
1123  return CTC::ComputeCTCTargets(truth_labels, null_char_,
1124  outputs->float_array(), targets);
1125 }
1126 
1127 // Computes network errors, and stores the results in the rolling buffers,
1128 // along with the supplied text_error.
1129 // Returns the delta error of the current sample (not running average.)
1131  double char_error, double word_error) {
1133  // Delta error is the fraction of timesteps with >0.5 error in the top choice
1134  // score. If zero, then the top choice characters are guaranteed correct,
1135  // even when there is residue in the RMS error.
1136  double delta_error = ComputeWinnerError(deltas);
1137  UpdateErrorBuffer(delta_error, ET_DELTA);
1138  UpdateErrorBuffer(word_error, ET_WORD_RECERR);
1139  UpdateErrorBuffer(char_error, ET_CHAR_ERROR);
1140  // Skip ratio measures the difference between sample_iteration_ and
1141  // training_iteration_, which reflects the number of unusable samples,
1142  // usually due to unencodable truth text, or the text not fitting in the
1143  // space for the output.
1144  double skip_count = sample_iteration_ - prev_sample_iteration_;
1145  UpdateErrorBuffer(skip_count, ET_SKIP_RATIO);
1146  return delta_error;
1147 }
1148 
1149 // Computes the network activation RMS error rate.
1151  double total_error = 0.0;
1152  int width = deltas.Width();
1153  int num_classes = deltas.NumFeatures();
1154  for (int t = 0; t < width; ++t) {
1155  const float* class_errs = deltas.f(t);
1156  for (int c = 0; c < num_classes; ++c) {
1157  double error = class_errs[c];
1158  total_error += error * error;
1159  }
1160  }
1161  return sqrt(total_error / (width * num_classes));
1162 }
1163 
1164 // Computes network activation winner error rate. (Number of values that are
1165 // in error by >= 0.5 divided by number of time-steps.) More closely related
1166 // to final character error than RMS, but still directly calculable from
1167 // just the deltas. Because of the binary nature of the targets, zero winner
1168 // error is a sufficient but not necessary condition for zero char error.
1170  int num_errors = 0;
1171  int width = deltas.Width();
1172  int num_classes = deltas.NumFeatures();
1173  for (int t = 0; t < width; ++t) {
1174  const float* class_errs = deltas.f(t);
1175  for (int c = 0; c < num_classes; ++c) {
1176  float abs_delta = fabs(class_errs[c]);
1177  // TODO(rays) Filtering cases where the delta is very large to cut out
1178  // GT errors doesn't work. Find a better way or get better truth.
1179  if (0.5 <= abs_delta)
1180  ++num_errors;
1181  }
1182  }
1183  return static_cast<double>(num_errors) / width;
1184 }
1185 
1186 // Computes a very simple bag of chars char error rate.
1188  const GenericVector<int>& ocr_str) {
1189  GenericVector<int> label_counts;
1190  label_counts.init_to_size(NumOutputs(), 0);
1191  int truth_size = 0;
1192  for (int i = 0; i < truth_str.size(); ++i) {
1193  if (truth_str[i] != null_char_) {
1194  ++label_counts[truth_str[i]];
1195  ++truth_size;
1196  }
1197  }
1198  for (int i = 0; i < ocr_str.size(); ++i) {
1199  if (ocr_str[i] != null_char_) {
1200  --label_counts[ocr_str[i]];
1201  }
1202  }
1203  int char_errors = 0;
1204  for (int i = 0; i < label_counts.size(); ++i) {
1205  char_errors += abs(label_counts[i]);
1206  }
1207  if (truth_size == 0) {
1208  return (char_errors == 0) ? 0.0 : 1.0;
1209  }
1210  return static_cast<double>(char_errors) / truth_size;
1211 }
1212 
1213 // Computes word recall error rate using a very simple bag of words algorithm.
1214 // NOTE that this is destructive on both input strings.
1215 double LSTMTrainer::ComputeWordError(STRING* truth_str, STRING* ocr_str) {
1216  using StrMap = std::unordered_map<std::string, int, std::hash<std::string>>;
1217  GenericVector<STRING> truth_words, ocr_words;
1218  truth_str->split(' ', &truth_words);
1219  if (truth_words.empty()) return 0.0;
1220  ocr_str->split(' ', &ocr_words);
1221  StrMap word_counts;
1222  for (int i = 0; i < truth_words.size(); ++i) {
1223  std::string truth_word(truth_words[i].string());
1224  StrMap::iterator it = word_counts.find(truth_word);
1225  if (it == word_counts.end())
1226  word_counts.insert(std::make_pair(truth_word, 1));
1227  else
1228  ++it->second;
1229  }
1230  for (int i = 0; i < ocr_words.size(); ++i) {
1231  std::string ocr_word(ocr_words[i].string());
1232  StrMap::iterator it = word_counts.find(ocr_word);
1233  if (it == word_counts.end())
1234  word_counts.insert(std::make_pair(ocr_word, -1));
1235  else
1236  --it->second;
1237  }
1238  int word_recall_errs = 0;
1239  for (StrMap::const_iterator it = word_counts.begin(); it != word_counts.end();
1240  ++it) {
1241  if (it->second > 0) word_recall_errs += it->second;
1242  }
1243  return static_cast<double>(word_recall_errs) / truth_words.size();
1244 }
1245 
1246 // Updates the error buffer and corresponding mean of the given type with
1247 // the new_error.
1248 void LSTMTrainer::UpdateErrorBuffer(double new_error, ErrorTypes type) {
1250  error_buffers_[type][index] = new_error;
1251  // Compute the mean error.
1252  int mean_count = std::min(training_iteration_ + 1, error_buffers_[type].size());
1253  double buffer_sum = 0.0;
1254  for (int i = 0; i < mean_count; ++i) buffer_sum += error_buffers_[type][i];
1255  double mean = buffer_sum / mean_count;
1256  // Trim precision to 1/1000 of 1%.
1257  error_rates_[type] = IntCastRounded(100000.0 * mean) / 1000.0;
1258 }
1259 
1260 // Rolls error buffers and reports the current means.
1263  if (NewSingleError(ET_DELTA) > 0.0)
1265  else
1268  if (debug_interval_ != 0) {
1269  tprintf("Mean rms=%g%%, delta=%g%%, train=%g%%(%g%%), skip ratio=%g%%\n",
1273  }
1274 }
1275 
1276 // Given that error_rate is either a new min or max, updates the best/worst
1277 // error rates, and record of progress.
1278 // Tester is an externally supplied callback function that tests on some
1279 // data set with a given model and records the error rates in a graph.
1280 STRING LSTMTrainer::UpdateErrorGraph(int iteration, double error_rate,
1281  const GenericVector<char>& model_data,
1282  TestCallback tester) {
1283  if (error_rate > best_error_rate_
1284  && iteration < best_iteration_ + kErrorGraphInterval) {
1285  // Too soon to record a new point.
1286  if (tester != nullptr && !worst_model_data_.empty()) {
1289  return tester->Run(worst_iteration_, nullptr, mgr_, CurrentTrainingStage());
1290  } else {
1291  return "";
1292  }
1293  }
1294  STRING result;
1295  // NOTE: there are 2 asymmetries here:
1296  // 1. We are computing the global minimum, but the local maximum in between.
1297  // 2. If the tester returns an empty string, indicating that it is busy,
1298  // call it repeatedly on new local maxima to test the previous min, but
1299  // not the other way around, as there is little point testing the maxima
1300  // between very frequent minima.
1301  if (error_rate < best_error_rate_) {
1302  // This is a new (global) minimum.
1303  if (tester != nullptr && !worst_model_data_.empty()) {
1306  result = tester->Run(worst_iteration_, worst_error_rates_, mgr_,
1309  best_model_data_ = model_data;
1310  }
1311  best_error_rate_ = error_rate;
1312  memcpy(best_error_rates_, error_rates_, sizeof(error_rates_));
1313  best_iteration_ = iteration;
1314  best_error_history_.push_back(error_rate);
1315  best_error_iterations_.push_back(iteration);
1316  // Compute 2% decay time.
1317  double two_percent_more = error_rate + 2.0;
1318  int i;
1319  for (i = best_error_history_.size() - 1;
1320  i >= 0 && best_error_history_[i] < two_percent_more; --i) {
1321  }
1322  int old_iteration = i >= 0 ? best_error_iterations_[i] : 0;
1323  improvement_steps_ = iteration - old_iteration;
1324  tprintf("2 Percent improvement time=%d, best error was %g @ %d\n",
1325  improvement_steps_, i >= 0 ? best_error_history_[i] : 100.0,
1326  old_iteration);
1327  } else if (error_rate > best_error_rate_) {
1328  // This is a new (local) maximum.
1329  if (tester != nullptr) {
1330  if (!best_model_data_.empty()) {
1333  result = tester->Run(best_iteration_, best_error_rates_, mgr_,
1335  } else if (!worst_model_data_.empty()) {
1336  // Allow for multiple data points with "worst" error rate.
1339  result = tester->Run(worst_iteration_, worst_error_rates_, mgr_,
1341  }
1342  if (result.length() > 0)
1344  worst_model_data_ = model_data;
1345  }
1346  }
1347  worst_error_rate_ = error_rate;
1348  memcpy(worst_error_rates_, error_rates_, sizeof(error_rates_));
1349  worst_iteration_ = iteration;
1350  return result;
1351 }
1352 
1353 } // namespace tesseract.
const int kTargetXScale
Definition: lstmtrainer.cpp:70
const UNICHARSET & GetUnicharset() const
void ExtractBestPathAsLabels(GenericVector< int > *labels, GenericVector< int > *xcoords) const
Definition: recodebeam.cpp:140
Network * GetLayer(const STRING &id) const
DocumentCache training_data_
Definition: lstmtrainer.h:414
bool InitNetwork(const STRING &network_spec, int append_index, int net_flags, float weight_range, float learning_rate, float momentum, float adam_beta)
bool LoadCharsets(const TessdataManager *mgr)
bool TransitionTrainingStage(float error_threshold)
STRING UpdateErrorGraph(int iteration, double error_rate, const GenericVector< char > &model_data, TestCallback tester)
const GenericVector< TBOX > & boxes() const
Definition: imagedata.h:150
void SetVersionString(const std::string &v_str)
bool SaveTrainingDump(SerializeAmount serialize_amount, const LSTMTrainer *trainer, GenericVector< char > *data) const
int NumOutputs() const
Definition: network.h:123
int size() const
Definition: genericvector.h:71
virtual void CountAlternators(const Network &other, double *same, double *changed) const
Definition: network.h:236
bool encode_string(const char *str, bool give_up_on_failure, GenericVector< UNICHAR_ID > *encoding, GenericVector< char > *lengths, int *encoded_length) const
Definition: unicharset.cpp:258
bool GetComponent(TessdataType type, TFile *fp)
void ScaleLayerLearningRate(const STRING &id, double factor)
double worst_error_rates_[ET_COUNT]
Definition: lstmtrainer.h:438
void OpenWrite(GenericVector< char > *data)
Definition: serialis.cpp:295
const ImageData * TrainOnLine(LSTMTrainer *samples_trainer, bool batch)
Definition: lstmtrainer.h:259
void SetActivations(int t, int label, float ok_score)
Definition: networkio.cpp:542
const int kMinStartedErrorRate
Definition: lstmtrainer.cpp:60
bool Serialize(SerializeAmount serialize_amount, const TessdataManager *mgr, TFile *fp) const
void Decode(const NetworkIO &output, double dict_ratio, double cert_offset, double worst_dict_cert, const UNICHARSET *charset, int lstm_choice_mode=0)
Definition: recodebeam.cpp:82
static void NormalizeProbs(NetworkIO *probs)
Definition: ctc.h:36
const double kHighConfidence
Definition: lstmtrainer.cpp:64
bool SaveDataToFile(const GenericVector< char > &data, const STRING &filename)
void OverwriteEntry(TessdataType type, const char *data, int size)
bool SaveTraineddata(const STRING &filename)
int learning_iteration() const
Definition: lstmtrainer.h:149
int EncodeUnichar(int unichar_id, RecodedCharID *code) const
const char * string() const
Definition: strngs.cpp:196
virtual R Run(A1, A2, A3, A4)=0
STRING DecodeLabels(const GenericVector< int > &labels)
const double kMinDivergenceRate
Definition: lstmtrainer.cpp:45
virtual STRING spec() const
Definition: network.h:141
bool DeSerialize(char *data, size_t count=1)
Definition: serialis.cpp:103
static const int kRollingBufferSize_
Definition: lstmtrainer.h:478
const STRING & language() const
Definition: imagedata.h:141
bool ReadLocalTrainingDump(const TessdataManager *mgr, const char *data, int size)
LSTMTrainer * sub_trainer_
Definition: lstmtrainer.h:450
int InitTensorFlowNetwork(const std::string &tf_proto)
void SetIteration(int iteration)
bool MaintainCheckpoints(TestCallback tester, STRING *log_msg)
UNICHAR_ID unichar_to_id(const char *const unichar_repr) const
Definition: unicharset.cpp:209
int ReduceLayerLearningRates(double factor, int num_samples, LSTMTrainer *samples_trainer)
ScrollView * align_win_
Definition: lstmtrainer.h:397
bool DeSerialize(bool swap, FILE *fp)
bool DebugLSTMTraining(const NetworkIO &inputs, const ImageData &trainingdata, const NetworkIO &fwd_outputs, const GenericVector< int > &truth_labels, const NetworkIO &outputs)
void ScaleLearningRate(double factor)
static bool ComputeCTCTargets(const GenericVector< int > &truth_labels, int null_char, const GENERIC_2D_ARRAY< float > &outputs, NetworkIO *targets)
Definition: ctc.cpp:55
_ConstTessMemberResultCallback_0_0< false, R, T1 >::base * NewPermanentTessCallback(const T1 *obj, R(T2::*member)() const)
Definition: tesscallback.h:116
const STRING & imagefilename() const
Definition: imagedata.h:126
double best_error_rates_[ET_COUNT]
Definition: lstmtrainer.h:432
bool Serialize(FILE *fp) const
void SubtractAllFromFloat(const NetworkIO &src)
Definition: networkio.cpp:829
NetworkScratch scratch_space_
constexpr size_t countof(T const (&)[N]) noexcept
Definition: serialis.h:43
int size() const
Definition: unicharset.h:336
int CurrentTrainingStage() const
Definition: lstmtrainer.h:211
const int kTargetYScale
Definition: lstmtrainer.cpp:71
double ComputeWordError(STRING *truth_str, STRING *ocr_str)
void UpdateErrorBuffer(double new_error, ErrorTypes type)
float GetLayerLearningRate(const STRING &id) const
void PrepareLogMsg(STRING *log_msg) const
void ReduceLearningRates(LSTMTrainer *samples_trainer, STRING *log_msg)
void Resize(const NetworkIO &src, int num_features)
Definition: networkio.h:45
Trainability PrepareForBackward(const ImageData *trainingdata, NetworkIO *fwd_outputs, NetworkIO *targets)
TessdataManager mgr_
Definition: lstmtrainer.h:483
void split(const char c, GenericVector< STRING > *splited)
Definition: strngs.cpp:284
GenericVector< char > best_trainer_
Definition: lstmtrainer.h:447
virtual void SetEnableTraining(TrainingState state)
Definition: network.cpp:110
bool ComputeCTCTargets(const GenericVector< int > &truth_labels, NetworkIO *outputs, NetworkIO *targets)
double ComputeWinnerError(const NetworkIO &deltas)
virtual StaticShape InputShape() const
Definition: network.h:127
GenericVector< double > best_error_history_
Definition: lstmtrainer.h:457
void SaveRecognitionDump(GenericVector< char > *data) const
Trainability GridSearchDictParams(const ImageData *trainingdata, int iteration, double min_dict_ratio, double dict_ratio_step, double max_dict_ratio, double min_cert_offset, double cert_offset_step, double max_cert_offset, STRING *results)
void DebugActivationPath(const NetworkIO &outputs, const GenericVector< int > &labels, const GenericVector< int > &xcoords)
ScrollView * ctc_win_
Definition: lstmtrainer.h:401
const double kImprovementFraction
Definition: lstmtrainer.cpp:66
SVEvent * AwaitEvent(SVEventType type)
Definition: scrollview.cpp:445
void FillErrorBuffer(double new_error, ErrorTypes type)
bool ReadTrainingDump(const GenericVector< char > &data, LSTMTrainer *trainer) const
Definition: lstmtrainer.h:291
void DisplayForward(const NetworkIO &inputs, const GenericVector< int > &labels, const GenericVector< int > &label_coords, const char *window_name, ScrollView **window)
void init_to_size(int size, const T &t)
void add_str_double(const char *str, double number)
Definition: strngs.cpp:389
bool DeSerialize(const TessdataManager *mgr, TFile *fp)
int page_number() const
Definition: imagedata.h:132
const STRING & transcription() const
Definition: imagedata.h:147
virtual R Run(A1, A2)=0
bool Init(const char *data_file_name)
int IntCastRounded(double x)
Definition: helpers.h:168
static void ClearWindow(bool tess_coords, const char *window_name, int width, int height, ScrollView **window)
Definition: network.cpp:306
void RecognizeLine(const ImageData &image_data, bool invert, bool debug, double worst_dict_cert, const TBOX &line_box, PointerVector< WERD_RES > *words, int lstm_choice_mode=0)
GenericVector< int > best_error_iterations_
Definition: lstmtrainer.h:458
void LogIterations(const char *intro_str, STRING *log_msg) const
int num_weights() const
Definition: network.h:119
double CharError() const
Definition: lstmtrainer.h:139
LIST search(LIST list, void *key, int_compare is_equal)
Definition: oldlist.cpp:366
const STRING & name() const
Definition: network.h:138
bool empty() const
Definition: genericvector.h:90
bool Serialize(const char *data, size_t count=1)
Definition: serialis.cpp:147
DLLSYM void tprintf(const char *format,...)
Definition: tprintf.cpp:37
bool TryLoadingCheckpoint(const char *filename, const char *old_traineddata)
double ComputeErrorRates(const NetworkIO &deltas, double char_error, double word_error)
bool(* FileReader)(const STRING &filename, GenericVector< char > *data)
void StartSubtrainer(STRING *log_msg)
static std::string CleanupString(const char *utf8_str)
Definition: unicharset.h:241
double NewSingleError(ErrorTypes type) const
Definition: lstmtrainer.h:154
SubTrainerResult UpdateSubtrainer(STRING *log_msg)
GenericVector< char > best_model_data_
Definition: lstmtrainer.h:444
virtual StaticShape OutputShape(const StaticShape &input_shape) const
Definition: network.h:133
bool Open(const STRING &filename, FileReader reader)
Definition: serialis.cpp:196
int push_back(T object)
const double kLearningRateDecay
Definition: lstmtrainer.cpp:52
void add_str_int(const char *str, int number)
Definition: strngs.cpp:379
STRING DumpFilename() const
float * f(int t)
Definition: networkio.h:115
bool EncodeString(const STRING &str, GenericVector< int > *labels) const
Definition: lstmtrainer.h:246
LossType OutputLossType() const
const int kMinStallIterations
Definition: lstmtrainer.cpp:47
bool LoadDocuments(const GenericVector< STRING > &filenames, CachingStrategy cache_strategy, FileReader reader)
Definition: imagedata.cpp:572
static const float kMinCertainty
Definition: recodebeam.h:222
Definition: strngs.h:45
bool ComputeTextTargets(const NetworkIO &outputs, const GenericVector< int > &truth_labels, NetworkIO *targets)
float error_rate_of_last_saved_best_
Definition: lstmtrainer.h:452
ScrollView * recon_win_
Definition: lstmtrainer.h:403
double learning_rate() const
static bool InitNetwork(int num_outputs, STRING network_spec, int append_index, int net_flags, float weight_range, TRand *randomizer, Network **network)
ScrollView * target_win_
Definition: lstmtrainer.h:399
GenericVector< STRING > EnumerateLayers() const
bool IsTraining() const
Definition: network.h:115
double SignedRand(double range)
Definition: helpers.h:61
const double kSubTrainerMarginFraction
Definition: lstmtrainer.cpp:50
const int kNumPagesPerBatch
Definition: lstmtrainer.cpp:58
void truncate(int size)
CachingStrategy
Definition: imagedata.h:42
bool DeSerialize(const TessdataManager *mgr, TFile *fp)
bool SaveFile(const STRING &filename, FileWriter writer) const
const double kStageTransitionThreshold
Definition: lstmtrainer.cpp:62
bool LoadDataFromFile(const char *filename, GenericVector< char > *data)
std::string VersionString() const
virtual void Update(float learning_rate, float momentum, float adam_beta, int num_samples)
Definition: network.h:231
bool has_special_codes() const
Definition: unicharset.h:717
bool TestFlag(NetworkFlags flag) const
Definition: network.h:144
std::vector< int > MapRecoder(const UNICHARSET &old_chset, const UnicharCompress &old_recoder) const
GenericVector< char > worst_model_data_
Definition: lstmtrainer.h:445
virtual void DebugWeights()
Definition: network.h:218
bool LoadAllTrainingData(const GenericVector< STRING > &filenames, CachingStrategy cache_strategy, bool randomly_rotate)
GenericVector< double > error_buffers_[ET_COUNT]
Definition: lstmtrainer.h:479
bool load_from_file(const char *const filename, bool skip_fragments)
Definition: unicharset.h:383
bool Serialize(const TessdataManager *mgr, TFile *fp) const
CheckPointReader checkpoint_reader_
Definition: lstmtrainer.h:424
const int kNumAdjustmentIterations
Definition: lstmtrainer.cpp:54
double ComputeRMSError(const NetworkIO &deltas)
int Width() const
Definition: networkio.h:107
virtual R Run(A1, A2, A3)=0
int32_t length() const
Definition: strngs.cpp:191
bool(* FileWriter)(const GenericVector< char > &data, const STRING &filename)
virtual int RemapOutputs(int old_no, const std::vector< int > &code_map)
Definition: network.h:186
bool AnySuspiciousTruth(float confidence_thr) const
Definition: networkio.cpp:584
double ComputeCharError(const GenericVector< int > &truth_str, const GenericVector< int > &ocr_str)
void DisplayTargets(const NetworkIO &targets, const char *window_name, ScrollView **window)
int NumFeatures() const
Definition: networkio.h:111
const int kErrorGraphInterval
Definition: lstmtrainer.cpp:56
#define ASSERT_HOST(x)
Definition: errcode.h:84
const GENERIC_2D_ARRAY< float > & float_array() const
Definition: networkio.h:139
virtual bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch, NetworkIO *back_deltas)
Definition: network.h:273
CheckPointWriter checkpoint_writer_
Definition: lstmtrainer.h:425
void LabelsFromOutputs(const NetworkIO &outputs, GenericVector< int > *labels, GenericVector< int > *xcoords)
double error_rates_[ET_COUNT]
Definition: lstmtrainer.h:481
const double kBestCheckpointFraction
Definition: lstmtrainer.cpp:68