47 static inline double log2(
double n) {
48 return log(n) / log(2.0);
56 :
INT_MEMBER(language_model_debug_level, 0,
"Language model debug level",
57 dict->getCCUtil()->params()),
59 "Turn on/off the use of character ngram model",
60 dict->getCCUtil()->params()),
62 "Maximum order of the character ngram model",
63 dict->getCCUtil()->params()),
64 INT_MEMBER(language_model_viterbi_list_max_num_prunable, 10,
65 "Maximum number of prunable (those for which" 66 " PrunablePath() is true) entries in each viterbi list" 67 " recorded in BLOB_CHOICEs",
68 dict->getCCUtil()->params()),
69 INT_MEMBER(language_model_viterbi_list_max_size, 500,
70 "Maximum size of viterbi lists recorded in BLOB_CHOICEs",
71 dict->getCCUtil()->params()),
73 "To avoid overly small denominators use this as the " 74 "floor of the probability returned by the ngram model.",
75 dict->getCCUtil()->params()),
77 "Average classifier score of a non-matching unichar.",
78 dict->getCCUtil()->params()),
79 BOOL_MEMBER(language_model_ngram_use_only_first_uft8_step, false,
80 "Use only the first UTF8 step of the given string" 81 " when computing log probabilities.",
82 dict->getCCUtil()->params()),
84 "Strength of the character ngram model relative to the" 85 " character classifier ",
86 dict->getCCUtil()->params()),
88 "Factor to bring log-probs into the same range as ratings" 89 " when multiplied by outline length ",
90 dict->getCCUtil()->params()),
91 BOOL_MEMBER(language_model_ngram_space_delimited_language, true,
92 "Words are delimited by space", dict->getCCUtil()->params()),
93 INT_MEMBER(language_model_min_compound_length, 3,
94 "Minimum length of compound words",
95 dict->getCCUtil()->params()),
97 "Penalty for words not in the frequent word dictionary",
98 dict->getCCUtil()->params()),
100 "Penalty for non-dictionary words",
101 dict->getCCUtil()->params()),
103 "Penalty for inconsistent punctuation",
104 dict->getCCUtil()->params()),
106 "Penalty for inconsistent case",
107 dict->getCCUtil()->params()),
109 "Penalty for inconsistent script",
110 dict->getCCUtil()->params()),
112 "Penalty for inconsistent character type",
113 dict->getCCUtil()->params()),
117 "Penalty for inconsistent font",
118 dict->getCCUtil()->params()),
120 "Penalty for inconsistent spacing",
121 dict->getCCUtil()->params()),
122 double_MEMBER(language_model_penalty_increment, 0.01,
"Penalty increment",
123 dict->getCCUtil()->params()),
124 INT_MEMBER(wordrec_display_segmentations, 0,
"Display Segmentations",
125 dict->getCCUtil()->params()),
127 "Use sigmoidal score for certainty",
128 dict->getCCUtil()->params()),
130 fontinfo_table_(fontinfo_table),
133 max_char_wh_ratio_(0.0),
134 acceptable_choice_found_(false) {
141 bool fixed_pitch,
float max_char_wh_ratio,
142 float rating_cert_scale) {
158 if (prev_word !=
nullptr && prev_word->
unichar_string() !=
nullptr) {
180 static void ScanParentsForCaseMix(
const UNICHARSET& unicharset,
182 if (parent_node ==
nullptr)
return;
184 for (vit.mark_cycle_pt(); !vit.cycled_list(); vit.forward()) {
191 if (other_case == unichar_id)
continue;
197 for (vit2.mark_cycle_pt(); !vit2.cycled_list() &&
198 vit2.data()->curr_b->unichar_id() != other_case;
200 if (!vit2.cycled_list()) {
211 static bool HasBetterCaseVariant(
const UNICHARSET& unicharset,
213 BLOB_CHOICE_LIST* choices) {
216 if (other_case == choice_id || other_case == INVALID_UNICHAR_ID)
220 BLOB_CHOICE_IT bc_it(choices);
221 for (bc_it.mark_cycle_pt(); !bc_it.cycled_list(); bc_it.forward()) {
223 if (better_choice->
unichar_id() == other_case)
225 else if (better_choice == choice)
258 bool just_classified,
259 int curr_col,
int curr_row,
260 BLOB_CHOICE_LIST *curr_list,
267 tprintf(
"\nUpdateState: col=%d row=%d %s",
268 curr_col, curr_row, just_classified ?
"just_classified" :
"");
270 tprintf(
"(parent=%p)\n", parent_node);
276 bool new_changed =
false;
282 bool has_alnum_mix =
false;
283 if (parent_node !=
nullptr) {
287 tprintf(
"No parents found to process\n");
291 has_alnum_mix =
true;
295 has_alnum_mix =
false;;
296 ScanParentsForCaseMix(unicharset, parent_node);
298 parent_node->
Print(
"Parent viterbi list");
303 ViterbiStateEntry_IT vit;
304 BLOB_CHOICE_IT c_it(curr_list);
305 for (c_it.mark_cycle_pt(); !c_it.cycled_list(); c_it.forward()) {
317 if (c_it.at_first() || !new_changed)
321 if (first_digit == choice) blob_choice_flags |=
kDigitFlag;
323 if (parent_node ==
nullptr) {
335 if (HasBetterCaseVariant(unicharset, choice, curr_list))
341 blob_choice_flags, denom, word_end, curr_col, curr_row,
342 choice, curr_state,
nullptr, pain_points,
343 word_res, best_choice_bundle, blamer_bundle);
352 c_it.data(), blob_choice_flags,
353 unicharset, word_res, &vit,
354 &top_choice_flags)) !=
nullptr) {
367 HasBetterCaseVariant(unicharset, choice, curr_list))
372 top_choice_flags, denom, word_end, curr_col, curr_row,
373 c_it.data(), curr_state, parent_vse, pain_points,
374 word_res, best_choice_bundle, blamer_bundle);
391 BLOB_CHOICE_IT c_it(curr_list);
394 for (c_it.mark_cycle_pt(); !c_it.cycled_list(); c_it.forward()) {
397 if (first_unichar ==
nullptr) first_unichar = c_it.data();
398 if (*first_lower ==
nullptr && unicharset.
get_islower(unichar_id)) {
399 *first_lower = c_it.data();
401 if (*first_upper ==
nullptr && unicharset.
get_isalpha(unichar_id) &&
403 *first_upper = c_it.data();
405 if (*first_digit ==
nullptr && unicharset.
get_isdigit(unichar_id)) {
406 *first_digit = c_it.data();
410 bool mixed = (*first_lower !=
nullptr || *first_upper !=
nullptr) &&
411 *first_digit !=
nullptr;
412 if (*first_lower ==
nullptr) *first_lower = first_unichar;
413 if (*first_upper ==
nullptr) *first_upper = first_unichar;
414 if (*first_digit ==
nullptr) *first_digit = first_unichar;
429 if (parent_node ==
nullptr)
return -1;
435 float lower_rating = 0.0f;
436 float upper_rating = 0.0f;
437 float digit_rating = 0.0f;
438 float top_rating = 0.0f;
441 for (vit.mark_cycle_pt(); !vit.cycled_list(); vit.forward()) {
448 while (unichar_id == INVALID_UNICHAR_ID &&
454 if (unichar_id != INVALID_UNICHAR_ID) {
456 if (top_lower ==
nullptr || lower_rating > rating) {
458 lower_rating = rating;
461 if (top_upper ==
nullptr || upper_rating > rating) {
463 upper_rating = rating;
466 if (top_digit ==
nullptr || digit_rating > rating) {
468 digit_rating = rating;
472 if (top_choice ==
nullptr || top_rating > rating) {
478 if (top_choice ==
nullptr)
return -1;
479 bool mixed = (top_lower !=
nullptr || top_upper !=
nullptr) &&
480 top_digit !=
nullptr;
481 if (top_lower ==
nullptr) top_lower = top_choice;
483 if (top_upper ==
nullptr) top_upper = top_choice;
485 if (top_digit ==
nullptr) top_digit = top_choice;
496 return mixed ? 1 : 0;
505 bool just_classified,
bool mixed_alnum,
const BLOB_CHOICE* bc,
507 WERD_RES* word_res, ViterbiStateEntry_IT* vse_it,
509 for (; !vse_it->cycled_list(); vse_it->forward()) {
513 if (!just_classified && !parent_vse->
updated)
continue;
515 parent_vse->
Print(
"Considering");
517 *top_choice_flags = blob_choice_flags;
530 (mixed_alnum || *top_choice_flags == 0))
536 (mixed_alnum || *top_choice_flags == 0))
545 tprintf(
"Parent %s has competition %s\n",
569 int curr_col,
int curr_row,
577 ViterbiStateEntry_IT vit;
579 tprintf(
"AddViterbiStateEntry for unichar %s rating=%.4f" 580 " certainty=%.4f top_choice_flags=0x%x",
584 tprintf(
" parent_vse=%p\n", parent_vse);
593 tprintf(
"AddViterbiStateEntry: viterbi list is full!\n");
602 float outline_length =
609 denom, curr_col, curr_row, outline_length, parent_vse);
612 bool liked_by_language_model = dawg_info !=
nullptr ||
613 (ngram_info !=
nullptr && !ngram_info->
pruned);
616 if (!liked_by_language_model && top_choice_flags == 0) {
618 tprintf(
"Language model components very early pruned this entry\n");
639 if (!liked_by_language_model && top_choice_flags == 0) {
641 tprintf(
"Language model components early pruned this entry\n");
650 word_res, &consistency_info);
651 if (dawg_info !=
nullptr && consistency_info.
invalid_punc) {
658 parent_vse, word_res, &associate_stats);
659 if (parent_vse !=
nullptr) {
666 parent_vse, b, 0.0, outline_length,
667 consistency_info, associate_stats, top_choice_flags, dawg_info,
688 tprintf(
"Language model components did not like this entry\n");
702 tprintf(
"Discarded ViterbiEntry with high cost %g max cost %g\n",
713 best_choice_bundle, blamer_bundle);
716 new_vse != best_choice_bundle->
best_vse) {
718 tprintf(
"Discarded ViterbiEntry with high cost %g\n", new_vse->
cost);
741 for (vit.mark_cycle_pt(); !vit.cycled_list(); vit.forward()) {
750 if (prunable_counter > 0 &&
PrunablePath(*curr_vse)) --prunable_counter;
752 if (prunable_counter == 0) {
755 tprintf(
"Set viterbi_state_entries_prunable_max_cost to %g\n",
758 prunable_counter = -1;
765 new_vse->
Print(
"New");
767 curr_state->
Print(
"Updated viterbi list");
777 for (vit.mark_cycle_pt(); !vit.cycled_list() && new_vse->
top_choice_flags &&
778 new_vse->
cost >= vit.data()->cost; vit.forward()) {
784 tprintf(
"GenerateTopChoiceInfo: top_choice_flags=0x%x\n",
791 int curr_col,
int curr_row,
796 if (parent_vse ==
nullptr) {
800 if (parent_vse->
dawg_info ==
nullptr)
return nullptr;
819 if (parent_vse ==
nullptr || word_end ||
826 bool has_word_ending =
false;
834 has_word_ending =
true;
838 if (!has_word_ending)
return nullptr;
852 for (
int i = 0; i < normed_ids.
size(); ++i) {
854 tprintf(
"Test Letter OK for unichar %d, normed %d\n",
857 word_end && i == normed_ids.
size() - 1);
860 }
else if (i < normed_ids.
size() - 1) {
865 tprintf(
"Letter was OK for unichar %d, normed %d\n",
881 const char *unichar,
float certainty,
float denom,
882 int curr_col,
int curr_row,
float outline_length,
885 const char *pcontext_ptr =
"";
886 int pcontext_unichar_step_len = 0;
887 if (parent_vse ==
nullptr) {
892 pcontext_unichar_step_len =
896 int unichar_step_len = 0;
899 float ngram_and_classifier_cost =
901 pcontext_ptr, &unichar_step_len,
902 &pruned, &ngram_cost);
906 ngram_and_classifier_cost *=
909 if (parent_vse !=
nullptr) {
910 ngram_and_classifier_cost +=
916 int num_remove = (unichar_step_len + pcontext_unichar_step_len -
918 if (num_remove > 0) pcontext_unichar_step_len -= num_remove;
919 while (num_remove > 0 && *pcontext_ptr !=
'\0') {
929 pcontext_ptr, pcontext_unichar_step_len, pruned, ngram_cost,
930 ngram_and_classifier_cost);
931 ngram_info->
context += unichar;
941 int *unichar_step_len,
942 bool *found_small_prob,
944 const char *context_ptr = context;
945 char *modified_context =
nullptr;
946 char *modified_context_end =
nullptr;
947 const char *unichar_ptr = unichar;
948 const char *unichar_end = unichar_ptr + strlen(unichar_ptr);
951 while (unichar_ptr < unichar_end &&
954 tprintf(
"prob(%s | %s)=%g\n", unichar_ptr, context_ptr,
958 ++(*unichar_step_len);
964 if (unichar_ptr < unichar_end) {
965 if (modified_context ==
nullptr) {
966 size_t context_len = strlen(context);
968 new char[context_len + strlen(unichar_ptr) + step + 1];
969 memcpy(modified_context, context, context_len);
970 modified_context_end = modified_context + context_len;
971 context_ptr = modified_context;
973 strncpy(modified_context_end, unichar_ptr - step, step);
974 modified_context_end += step;
975 *modified_context_end =
'\0';
978 prob /=
static_cast<float>(*unichar_step_len);
981 *found_small_prob =
true;
984 *ngram_cost = -1.0*log2(prob);
985 float ngram_and_classifier_cost =
989 tprintf(
"-log [ p(%s) * p(%s | %s) ] = -log2(%g*%g) = %g\n", unichar,
991 ngram_and_classifier_cost);
993 delete[] modified_context;
994 return ngram_and_classifier_cost;
998 if (curr_list->empty())
return 1.0f;
1001 BLOB_CHOICE_IT c_it(curr_list);
1002 for (c_it.mark_cycle_pt(); !c_it.cycled_list(); c_it.forward()) {
1036 consistency_info->
punc_ref = NO_EDGE;
1039 bool prev_is_numalpha = (parent_b !=
nullptr &&
1045 (is_apos && prev_is_numalpha)) ?
1047 if (consistency_info->
punc_ref == NO_EDGE ||
1055 node, pattern_unichar_id, word_end) : NO_EDGE;
1056 if (consistency_info->
punc_ref == NO_EDGE) {
1071 }
else if ((parent_b !=
nullptr) && unicharset.
get_isupper(unichar_id)) {
1094 if (parent_vse !=
nullptr &&
1100 consistency_info->
script_id = parent_script_id;
1102 if (consistency_info->
script_id != parent_script_id) {
1118 int fontinfo_id = -1;
1127 tprintf(
"pfont %s pfont %s font %s font2 %s common %s(%d)\n",
1139 bool expected_gap_found =
false;
1140 float expected_gap = 0.0f;
1142 if (fontinfo_id >= 0) {
1143 ASSERT_HOST(fontinfo_id < fontinfo_table_->size());
1145 parent_b->
unichar_id(), unichar_id, &temp_gap)) {
1146 expected_gap = temp_gap;
1147 expected_gap_found =
true;
1152 int num_addends = 0;
1154 for (
int i = 0; i < 4; ++i) {
1157 }
else if (i == 1) {
1159 }
else if (i == 2) {
1164 ASSERT_HOST(temp_fid < 0 || fontinfo_table_->size());
1166 parent_b->
unichar_id(), unichar_id, &temp_gap)) {
1167 expected_gap += temp_gap;
1171 if (num_addends > 0) {
1172 expected_gap /=
static_cast<float>(num_addends);
1173 expected_gap_found =
true;
1176 if (expected_gap_found) {
1178 static_cast<float>(word_res->
GetBlobsGap(curr_col-1));
1179 float gap_ratio = expected_gap / actual_gap;
1185 if (gap_ratio < 0.0f || gap_ratio > 2.0f) {
1189 tprintf(
"spacing for %s(%d) %s(%d) col %d: expected %g actual %g\n",
1192 unichar_id, curr_col, expected_gap, actual_gap);
1206 tprintf(
"ComputeAdjustedPathCost %g ParamsModel features:\n", cost);
1209 tprintf(
"%s=%g\n", kParamsTrainingFeatureTypeName[f], features[f]);
1215 float adjustment = 1.0f;
1228 static_cast<float>(vse->
length);
1249 blamer_bundle, &truth_path);
1257 word->
print(
"UpdateBestChoice() constructed word");
1261 if (blamer_bundle !=
nullptr) {
1268 tprintf(
"Raw features extracted from %s (cost=%g) [ ",
1292 tprintf(
"Updated raw choice\n");
1316 best_choice_bundle->
updated =
true;
1317 best_choice_bundle->
best_vse = vse;
1319 tprintf(
"Updated best choice\n");
1331 if (blamer_bundle !=
nullptr) {
1345 int len = vse.
length <= kMaxSmallWordUnichars ? 0 :
1346 vse.
length <= kMaxMediumWordUnichars ? 1 : 2;
1396 if (truth_path !=
nullptr) {
1398 (blamer_bundle !=
nullptr &&
1409 float full_wh_ratio_mean = 0.0f;
1413 static_cast<float>(vse->
length));
1420 int total_blobs = 0;
1421 for (i = (vse->
length-1); i >= 0; --i) {
1422 if (blamer_bundle !=
nullptr && truth_path !=
nullptr && *truth_path &&
1424 *truth_path =
false;
1428 total_blobs += num_blobs;
1433 if ((full_wh_ratio_mean != 0.0f &&
1434 ((curr_vse != vse && curr_vse->
parent_vse !=
nullptr) ||
1439 tprintf(
"full_wh_ratio_var += (%g-%g)^2\n",
1452 if (curr_vse ==
nullptr)
break;
1453 curr_b = curr_vse->
curr_b;
1458 if (full_wh_ratio_mean != 0.0f) {
int viterbi_state_entries_prunable_length
Number and max cost of prunable paths in viterbi_state_entries.
bool GuidedSegsearchStillGoing() const
void UpdateBestChoice(ViterbiStateEntry *vse, LMPainPoints *pain_points, WERD_RES *word_res, BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle)
void FillConsistencyInfo(int curr_col, bool word_end, BLOB_CHOICE *b, ViterbiStateEntry *parent_vse, WERD_RES *word_res, LMConsistencyInfo *consistency_info)
static const float kBadRating
bool get_islower(UNICHAR_ID unichar_id) const
float CertaintyScore(float cert)
bool acceptable_choice_found_
float ngram_and_classifier_cost
-[ ln(P_classifier(path)) + scale_factor * ln(P_ngram_model(path)) ]
int language_model_viterbi_list_max_num_prunable
bool get_ispunctuation(UNICHAR_ID unichar_id) const
static int Compare(const void *e1, const void *e2)
const Dawg * GetPuncDawg() const
Return the points to the punctuation dawg.
void set_best_choice_is_dict_and_top_choice(bool value)
AssociateStats associate_stats
LanguageModelDawgInfo * GenerateDawgInfo(bool word_end, int curr_col, int curr_row, const BLOB_CHOICE &b, const ViterbiStateEntry *parent_vse)
float full_wh_ratio_total
BLOB_CHOICE * curr_b
Pointers to BLOB_CHOICE and parent ViterbiStateEntry (not owned by this).
float features[PTRAIN_NUM_FEATURE_TYPES]
int NumInconsistentSpaces() const
int prev_word_unichar_step_len_
void reset_hyphen_vars(bool last_word_on_line)
void Print(const char *msg) const
GenericVector< int > blob_widths
bool language_model_ngram_space_delimited_language
void adjust_word(WERD_CHOICE *word, bool nonword, XHeightConsistencyEnum xheight_consistency, float additional_adjust, bool modify_rating, bool debug)
Adjusts the rating of the given word.
int GetBlobsGap(int blob_index)
LanguageModelNgramInfo * GenerateNgramInfo(const char *unichar, float certainty, float denom, int curr_col, int curr_row, float outline_length, const ViterbiStateEntry *parent_vse)
virtual UNICHAR_ID edge_letter(EDGE_REF edge_ref) const =0
Returns UNICHAR_ID stored in the edge indicated by the given EDGE_REF.
const char * string() const
int tessedit_truncate_wordchoice_log
static const LanguageModelFlagsType kXhtConsistentFlag
ViterbiStateEntry * competing_vse
bool correct_segmentation_explored_
#define BOOL_INIT_MEMBER(name, val, comment, vec)
void init_active_dawgs(DawgPositionVector *active_dawgs, bool ambigs_mode) const
int viterbi_state_entries_length
Total number of entries in viterbi_state_entries.
int SetTopParentLowerUpperDigit(LanguageModelState *parent_node) const
int context_unichar_step_len
int language_model_min_compound_length
bool AcceptableChoice(const WERD_CHOICE &best_choice, XHeightConsistencyEnum xheight_consistency)
Returns true if the given best_choice is good enough to stop.
Struct to store information maintained by various language model components.
#define INT_MEMBER(name, val, comment, vec)
void InitForWord(const WERD_CHOICE *prev_word, bool fixed_pitch, float max_char_wh_ratio, float rating_cert_scale)
int InconsistentXHeight() const
void Print(const char *msg)
const GenericVector< UNICHAR_ID > & normed_ids(UNICHAR_ID unichar_id) const
void ComputeAssociateStats(int col, int row, float max_char_wh_ratio, ViterbiStateEntry *parent_vse, WERD_RES *word_res, AssociateStats *associate_stats)
static void ExtractFeaturesFromPath(const ViterbiStateEntry &vse, float features[])
bool get_isalpha(UNICHAR_ID unichar_id) const
double language_model_ngram_scale_factor
#define BOOL_MEMBER(name, val, comment, vec)
int NumInconsistentCase() const
bool AddViterbiStateEntry(LanguageModelFlagsType top_choice_flags, float denom, bool word_end, int curr_col, int curr_row, BLOB_CHOICE *b, LanguageModelState *curr_state, ViterbiStateEntry *parent_vse, LMPainPoints *pain_points, WERD_RES *word_res, BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle)
int16_t fontinfo_id() const
static const LanguageModelFlagsType kSmallestRatingFlag
static const LanguageModelFlagsType kDigitFlag
LanguageModelNgramInfo * ngram_info
double language_model_penalty_non_freq_dict_word
ViterbiStateEntry_LIST viterbi_state_entries
Storage for the Viterbi state.
float ComputeAdjustedPathCost(ViterbiStateEntry *vse)
unsigned char LanguageModelFlagsType
Used for expressing various language model flags.
double language_model_ngram_small_prob
float ComputeNgramCost(const char *unichar, float certainty, float denom, const char *context, int *unichar_step_len, bool *found_small_prob, float *ngram_prob)
static NODE_REF GetStartingNode(const Dawg *dawg, EDGE_REF edge_ref)
Returns the appropriate next node given the EDGE_REF.
float ComputeCost(const float features[]) const
PointerVector< LanguageModelState > beam
static const LanguageModelFlagsType kUpperCaseFlag
#define double_MEMBER(name, val, comment, vec)
const Dawg * GetDawg(int index) const
Return i-th dawg pointer recorded in the dawgs_ vector.
ParamsModel params_model_
void print_state(const char *msg) const
void set_hyphen_word(const WERD_CHOICE &word, const DawgPositionVector &active_dawgs)
bool PosAndSizeAgree(const BLOB_CHOICE &other, float x_height, bool debug) const
bool get_isdigit(UNICHAR_ID unichar_id) const
float ngram_cost
-ln(P_ngram_model(path))
int num_inconsistent_spaces
int16_t fontinfo_id2() const
float ComputeConsistencyAdjustment(const LanguageModelDawgInfo *dawg_info, const LMConsistencyInfo &consistency_info)
void set_blob_choice(int index, int blob_count, const BLOB_CHOICE *blob_choice)
void DisplaySegmentation(TWERD *word)
DawgPositionVector beginning_active_dawgs_
bool GetTopLowerUpperDigit(BLOB_CHOICE_LIST *curr_list, BLOB_CHOICE **first_lower, BLOB_CHOICE **first_upper, BLOB_CHOICE **first_digit) const
virtual EDGE_REF edge_char_of(NODE_REF node, UNICHAR_ID unichar_id, bool word_end) const =0
Returns the edge that corresponds to the letter out of this node.
int language_model_ngram_order
bool LogNewRawChoice(WERD_CHOICE *word_choice)
float ComputeDenom(BLOB_CHOICE_LIST *curr_list)
bool HasAlnumChoice(const UNICHARSET &unicharset)
void UpdateBestRating(float rating)
DawgPositionVector * updated_dawgs
double language_model_ngram_rating_factor
bool language_model_ngram_on
bool AcceptablePath(const ViterbiStateEntry &vse)
double language_model_penalty_increment
void GenerateTopChoiceInfo(ViterbiStateEntry *new_vse, const ViterbiStateEntry *parent_vse, LanguageModelState *lms)
DLLSYM void tprintf(const char *format,...)
DANGERR fixpt
Places to try to fix the word suggested by ambiguity checking.
const MATRIX_COORD & matrix_cell()
Bundle together all the things pertaining to the best choice/state.
float BodyMinXHeight() const
static int utf8_step(const char *utf8_str)
GenericVector< TBLOB * > blobs
void string_and_lengths(STRING *word_str, STRING *word_lengths_str) const
bool SizesDistinct(UNICHAR_ID id1, UNICHAR_ID id2) const
bool PrunablePath(const ViterbiStateEntry &vse)
LMConsistencyInfo consistency_info
const CHAR_FRAGMENT * get_fragment(UNICHAR_ID unichar_id) const
int wordrec_display_segmentations
double ProbabilityInContext(const char *context, int context_bytes, const char *character, int character_bytes)
Calls probability_in_context_ member function.
int language_model_debug_level
bool language_model_ngram_use_only_first_uft8_step
ViterbiStateEntry * GetNextParentVSE(bool just_classified, bool mixed_alnum, const BLOB_CHOICE *bc, LanguageModelFlagsType blob_choice_flags, const UNICHARSET &unicharset, WERD_RES *word_res, ViterbiStateEntry_IT *vse_it, LanguageModelFlagsType *top_choice_flags) const
static float ComputeOutlineLength(float rating_cert_scale, const BLOB_CHOICE &b)
ViterbiStateEntry * best_vse
Best ViterbiStateEntry and BLOB_CHOICE.
bool hyphenated() const
Returns true if we've recorded the beginning of a hyphenated word.
bool updated
Flag to indicate whether anything was changed.
const UNICHARSET * uch_set
LanguageModelFlagsType top_choice_flags
const char * id_to_unichar(UNICHAR_ID id) const
void set_x_heights(float min_height, float max_height)
const UnicityTable< FontInfo > * fontinfo_table_
DawgPositionVector * active_dawgs
const UNICHARSET & getUnicharset() const
const STRING & unichar_string() const
void set_dangerous_ambig_found_(bool value)
DawgPositionVector very_beginning_active_dawgs_
static const UNICHAR_ID kPatternUnicharID
double language_model_penalty_non_dict_word
bool is_apostrophe(UNICHAR_ID unichar_id)
static const LanguageModelFlagsType kLowerCaseFlag
void default_dawgs(DawgPositionVector *anylength_dawgs, bool suppress_patterns) const
double language_model_ngram_nonmatch_score
bool compound_marker(UNICHAR_ID unichar_id)
int NumInconsistentChartype() const
bool has_hyphen_end(UNICHAR_ID unichar_id, bool first_pos) const
Check whether the word has a hyphen at the end.
XHeightConsistencyEnum xht_decision
UNICHAR_ID get_other_case(UNICHAR_ID unichar_id) const
void SetScriptPositions(bool small_caps, TWERD *word, int debug=0)
bool get_isupper(UNICHAR_ID unichar_id) const
bool UpdateState(bool just_classified, int curr_col, int curr_row, BLOB_CHOICE_LIST *curr_list, LanguageModelState *parent_node, LMPainPoints *pain_points, WERD_RES *word_res, BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle)
float viterbi_state_entries_prunable_max_cost
bool MatrixPositionCorrect(int index, const MATRIX_COORD &coord)
int language_model_viterbi_list_max_size
ViterbiStateEntry * parent_vse
float BodyMaxXHeight() const
WERD_CHOICE * ConstructWord(ViterbiStateEntry *vse, WERD_RES *word_res, DANGERR *fixpt, BlamerBundle *blamer_bundle, bool *truth_path)
UNICHAR_ID unichar_id() const
int correct_segmentation_length() const
LanguageModel(const UnicityTable< FontInfo > *fontinfo_table, Dict *dict)
WERD_CHOICE * best_choice
bool LogNewCookedChoice(int max_num_choices, bool debug, WERD_CHOICE *word_choice)
DawgPositionVector active_dawgs
static const float kMaxAvgNgramCost
int get_script(UNICHAR_ID unichar_id) const
void set_certainty(float new_val)
virtual bool end_of_word(EDGE_REF edge_ref) const =0
void ComputeXheightConsistency(const BLOB_CHOICE *b, bool is_punc)
LanguageModelDawgInfo * dawg_info
int LetterIsOkay(void *void_dawg_args, const UNICHARSET &unicharset, UNICHAR_ID unichar_id, bool word_end) const
Calls letter_is_okay_ member function.
bool NoDangerousAmbig(WERD_CHOICE *BestChoice, DANGERR *fixpt, bool fix_replaceable, MATRIX *ratings)
void set_rating(float new_val)
void set_permuter(uint8_t perm)
void AddHypothesis(const tesseract::ParamsTrainingHypothesis &hypo)