libjulius/include/julius/define.h

Go to the documentation of this file.
00001 
00033 /*
00034  * Copyright (c) 1991-2007 Kawahara Lab., Kyoto University
00035  * Copyright (c) 2000-2005 Shikano Lab., Nara Institute of Science and Technology
00036  * Copyright (c) 2005-2007 Julius project team, Nagoya Institute of Technology
00037  * All rights reserved
00038  */
00039 
00040 #ifndef __J_DEFINE_H__
00041 #define __J_DEFINE_H__
00042 
00043 
00044 /*****************************************************************************/
00046 /*****************************************************************************/
00047 
00048 /* type of language model */
00049 #define LM_UNDEF 0              
00050 #define LM_PROB 1               
00051 #define LM_DFA 2                
00052 
00053 /* LM variation specification */
00054 #define LM_NGRAM 0              
00055 #define LM_DFA_GRAMMAR 1        
00056 #define LM_DFA_WORD 2           
00057 #define LM_NGRAM_USER 3         
00058 
00059 /* recognition status */
00060 #define J_RESULT_STATUS_REJECT_POWER -6 
00061 #define J_RESULT_STATUS_TERMINATE -5 
00062 #define J_RESULT_STATUS_ONLY_SILENCE -4 
00063 #define J_RESULT_STATUS_REJECT_GMM -3 
00064 #define J_RESULT_STATUS_REJECT_SHORT -2 
00065 #define J_RESULT_STATUS_FAIL -1 
00066 #define J_RESULT_STATUS_SUCCESS 0 
00067 
00068 /* delete incoherent option */
00069 /* CATEGORY_TREE: DFA=always on, NGRAM=always off */
00070 /* switch with recog->category_tree */
00071 /* UNIGRAM_FACTORING: DFA=always off, NGRAM=option */
00072 /* enclose UNIGRAM_FACTORING section with "if (lmtype == LM_NGRAM)" */
00073 
00074 /* abbreviations for verbose message output */
00075 #define VERMES if (verbose_flag) jlog
00076 
00081 #undef REPORT_MEMORY_USAGE
00082 
00083 /*** N-gram tree construction ***/
00084 /* With 1-best approximation, Constructing a single tree from all words
00085    causes much error by factoring.  Listing each word flatly with no
00086    tree-organization will not cause this error, but the network becomes
00087    much larger and, especially, the inter-word LM handling becomes much more
00088    complex (O(n^2)).  The cost may be eased by LM caching, but it needs much
00089    memory. */
00090 /* This is a trade-off of accuracy and cost */
00091 #define SHORT_WORD_LEN 2
00092 #ifdef LOWMEM
00093 /* don't separate, construct a single tree from all words */
00094 /* root nodes are about 50 in monophone, cache size will be 5MB on max */
00095 #define NO_SEPARATE_SHORT_WORD
00096 #else
00097 #ifdef LOWMEM2
00098 /* experimental: separate words frequently appears in corpus (1-gram) */
00099 /* root nodes will be "-sepnum num" + 50, cache size will be 10MB or so */
00100 #define NO_SEPARATE_SHORT_WORD
00101 #define SEPARATE_BY_UNIGRAM
00102 #else
00103 /* separate all short words (<= 2 phonemes) */
00104 /* root nodes are about 1100 in 20k (proportional to vocabulary),
00105    cache size will be about 100MB on max */
00106 #endif /* LOWMEM2 */
00107 #endif /* LOWMEM */
00108 
00109 /*#define HASH_CACHE_IW*/
00110 /* "./configure --enable-lowmem" defines NO_SEPARATE_SHORT_WORD instead */
00111 
00112 /* default language model weight and insertion penalty for pass1 and pass2 */
00113 /* these values come from the best parameters in IPA evaluation result */
00114 #define DEFAULT_LM_WEIGHT_MONO_PASS1   5.0
00115 #define DEFAULT_LM_PENALTY_MONO_PASS1 -1.0
00116 #define DEFAULT_LM_WEIGHT_MONO_PASS2   6.0
00117 #define DEFAULT_LM_PENALTY_MONO_PASS2  0.0
00118 #ifdef PASS1_IWCD
00119 #define DEFAULT_LM_WEIGHT_TRI_PASS1   8.0
00120 #define DEFAULT_LM_PENALTY_TRI_PASS1 -2.0
00121 #define DEFAULT_LM_WEIGHT_TRI_PASS2   8.0
00122 #define DEFAULT_LM_PENALTY_TRI_PASS2 -2.0
00123 #else
00124 #define DEFAULT_LM_WEIGHT_TRI_PASS1   9.0
00125 #define DEFAULT_LM_PENALTY_TRI_PASS1  8.0
00126 #define DEFAULT_LM_WEIGHT_TRI_PASS2  11.0
00127 #define DEFAULT_LM_PENALTY_TRI_PASS2 -2.0
00128 #endif /* PASS1_IWCD */
00129 
00130 /* Switch head/tail word insertion penalty to be inserted */
00131 #undef FIX_PENALTY
00132 
00133 /* some definitions for short-pause segmentation */
00134 #undef SP_BREAK_EVAL            /* output messages for evaluation */
00135 #undef SP_BREAK_DEBUG           /* output messages for debug */
00136 #undef SP_BREAK_RESUME_WORD_BEGIN /* resume word = maxword at beginning of sp area */
00137 
00138 #ifdef GMM_VAD
00139 #define DEFAULT_GMM_MARGIN 20   /* backstep margin / determine buffer length */
00140 #define GMM_VAD_AUTOSHRINK_LIMIT 500
00141 #undef GMM_VAD_DEBUG            /* output debug message */
00142 #endif
00143 
00144 /* default values for spseg_naist */
00145 #ifdef SPSEGMENT_NAIST
00146 #define DEFAULT_SP_MARGIN 40
00147 #define DEFAULT_SP_DELAY 4
00148 #define SPSEGMENT_NAIST_AUTOSHRINK_LIMIT 500
00149 #endif
00150 
00151 /* '01/10/18 by ri: enable fix for trellis lookup order */
00152 #define PREFER_CENTER_ON_TRELLIS_LOOKUP
00153 
00154 /* '01/11/28 by ri: malloc step for startnode for multipath mode */
00155 #define STARTNODE_STEP 300
00156 
00157 /* default dict entry for IW-sp word that will be added to dict with -iwspword */
00158 #define IWSPENTRY_DEFAULT "<UNK> [sp] sp sp"
00159 
00160 /* confidence scoring method */
00161 #ifdef CONFIDENCE_MEASURE
00162 # ifndef CM_NBEST       /* use conventional N-best CM, will be defined if "--enable-cm-nbest" specified */
00163 #  define CM_SEARCH     /* otherwise, use on-the-fly CM scoring */
00164 # endif
00165 #endif
00166 
00167 /* dynamic word graph generation */
00168 #undef GRAPHOUT_SEARCH_CONSIDER_RIGHT /* if defined, only hypothesis whose
00169                                          left/right contexts is already
00170                                          included in popped hypo will be merged.
00171                                          EXPERIMENTAL, should not be defined.
00172                                        */
00173 #ifdef CM_SEARCH_LIMIT
00174 #undef CM_SEARCH_LIMIT_AFTER    /* enable above only after 1 sentence found */
00175 #undef CM_SEARCH_LIMIT_POP      /* terminate hypo of low CM on pop */
00176 #endif
00177 
00178 /* compute exact boundary instead of using 1st pass result */
00179 /* also propagate exact time boundary to the right context after generation */
00180 /* this may produce precise word boundary, but cause bigger word graph output */
00181 #define GRAPHOUT_PRECISE_BOUNDARY
00182 
00183 #undef GDEBUG                   /* enable debug message in graphout.c */
00184 
00185 /* some decoding fix candidates */
00186 #undef FIX_35_PASS2_STRICT_SCORE /* fix hypothesis scores by enabling
00187                                       bt_discount_pescore() in standard mode
00188                                       with PASS2_STRICT_IWCD, 
00189                                    */
00190 #define FIX_35_INHIBIT_SAME_WORD_EXPANSION /* privent connecting the same trellis word in 2nd pass */
00191 
00192 
00193 /* below are new since 3.5.2 */
00194 
00205 #define GRAPHOUT_OVERWRITE
00206 
00207 /* with GRAPHOUT_OVERWRITE, use gscore_head instead of fscore_head */
00213 #undef GRAPHOUT_OVERWRITE_GSCORE
00214 
00221 #define GRAPHOUT_LIMIT_BOUNDARY_LOOP
00222 
00235 #define GRAPHOUT_SEARCH_DELAY_TERMINATION
00236 
00242 #define GRAPHOUT_DEPTHCUT
00243 
00249 #define MINIMAL_BEAM_WIDTH 200
00250 
00255 #undef USE_OLD_IWCD
00256 
00262 #undef DETERMINE
00263 
00264 #define FWD_NGRAM
00265 
00266 #define MAX_SPEECH_ALLOC_STEP 320000
00267 
00268 
00269 #define POWER_REJECT_DEFAULT_THRES 9.0
00270 
00271 #endif /* __J_DEFINE_H__ */
00272 

Generated on Tue Dec 18 15:59:50 2007 for Julius by  doxygen 1.5.4