summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--utils/training/eval_correction_rate.cpp6
-rw-r--r--utils/training/gen_deleted_ngram.cpp9
-rw-r--r--utils/training/gen_k_mixture_model.cpp10
-rw-r--r--utils/training/gen_ngram.cpp8
-rw-r--r--utils/training/import_k_mixture_model.cpp6
5 files changed, 4 insertions, 35 deletions
diff --git a/utils/training/eval_correction_rate.cpp b/utils/training/eval_correction_rate.cpp
index ed37266..29667ba 100644
--- a/utils/training/eval_correction_rate.cpp
+++ b/utils/training/eval_correction_rate.cpp
@@ -147,10 +147,6 @@ int main(int argc, char * argv[]){
exit(ENOENT);
}
- PhraseTokens phrase_tokens;
- memset(phrase_tokens, 0, sizeof(PhraseTokens));
- phrase_index.prepare_tokens(phrase_tokens);
-
/* Evaluates the correction rate of test text documents. */
size_t tested_count = 0; size_t passed_count = 0;
char* linebuf = NULL; size_t size = 0;
@@ -196,7 +192,5 @@ int main(int argc, char * argv[]){
fclose(evals_file);
free(linebuf);
- phrase_index.destroy_tokens(phrase_tokens);
-
return 0;
}
diff --git a/utils/training/gen_deleted_ngram.cpp b/utils/training/gen_deleted_ngram.cpp
index 55679ed..157266a 100644
--- a/utils/training/gen_deleted_ngram.cpp
+++ b/utils/training/gen_deleted_ngram.cpp
@@ -70,12 +70,7 @@ int main(int argc, char * argv[]){
Bigram bigram;
bigram.attach(bigram_filename, ATTACH_CREATE|ATTACH_READWRITE);
- PhraseTokens tokens;
- memset(tokens, 0, sizeof(PhraseTokens));
- phrase_index.prepare_tokens(tokens);
-
- char* linebuf = NULL;
- size_t size = 0;
+ char* linebuf = NULL; size_t size = 0;
phrase_token_t last_token, cur_token = last_token = 0;
while( getline(&linebuf, &size, stdin) ){
if ( feof(stdin) )
@@ -122,8 +117,6 @@ int main(int argc, char * argv[]){
delete single_gram;
}
- phrase_index.destroy_tokens(tokens);
-
free(linebuf);
return 0;
}
diff --git a/utils/training/gen_k_mixture_model.cpp b/utils/training/gen_k_mixture_model.cpp
index eae75c4..9bea6ab 100644
--- a/utils/training/gen_k_mixture_model.cpp
+++ b/utils/training/gen_k_mixture_model.cpp
@@ -51,12 +51,8 @@ bool read_document(PhraseLargeTable2 * phrase_table,
FILE * document,
HashofDocument hash_of_document,
HashofUnigram hash_of_unigram){
- PhraseTokens tokens;
- memset(tokens, 0, sizeof(PhraseTokens));
- phrase_index->prepare_tokens(tokens);
- char * linebuf = NULL;
- size_t size = 0;
+ char * linebuf = NULL;size_t size = 0;
phrase_token_t last_token, cur_token = last_token = 0;
while ( getline(&linebuf, &size, document) ){
@@ -67,7 +63,7 @@ bool read_document(PhraseLargeTable2 * phrase_table,
linebuf[strlen(linebuf) - 1] = '\0';
}
- TAGLIB_PARSE_SEGMENTED_LINE(&phrase_index, token, linebuf);
+ TAGLIB_PARSE_SEGMENTED_LINE(phrase_index, token, linebuf);
last_token = cur_token;
cur_token = token;
@@ -128,8 +124,6 @@ bool read_document(PhraseLargeTable2 * phrase_table,
free(linebuf);
- phrase_index->destroy_tokens(tokens);
-
return true;
}
diff --git a/utils/training/gen_ngram.cpp b/utils/training/gen_ngram.cpp
index cc68d9e..603277c 100644
--- a/utils/training/gen_ngram.cpp
+++ b/utils/training/gen_ngram.cpp
@@ -70,12 +70,7 @@ int main(int argc, char * argv[]){
Bigram bigram;
bigram.attach(bigram_filename, ATTACH_CREATE|ATTACH_READWRITE);
- PhraseTokens tokens;
- memset(tokens, 0, sizeof(PhraseTokens));
- phrase_index.prepare_tokens(tokens);
-
- char* linebuf = NULL;
- size_t size = 0;
+ char* linebuf = NULL; size_t size = 0;
phrase_token_t last_token, cur_token = last_token = 0;
while( getline(&linebuf, &size, stdin) ){
if ( feof(stdin) )
@@ -125,7 +120,6 @@ int main(int argc, char * argv[]){
delete single_gram;
}
- phrase_index.destroy_tokens(tokens);
free(linebuf);
if (!save_phrase_index(&phrase_index))
diff --git a/utils/training/import_k_mixture_model.cpp b/utils/training/import_k_mixture_model.cpp
index cdd04e0..1e62889 100644
--- a/utils/training/import_k_mixture_model.cpp
+++ b/utils/training/import_k_mixture_model.cpp
@@ -283,10 +283,6 @@ int main(int argc, char * argv[]){
KMixtureModelBigram bigram(K_MIXTURE_MODEL_MAGIC_NUMBER);
bigram.attach(k_mixture_model_filename, ATTACH_READWRITE|ATTACH_CREATE);
- PhraseTokens tokens;
- memset(tokens, 0, sizeof(PhraseTokens));
- phrase_index.prepare_tokens(tokens);
-
taglib_init();
/* prepare to read n-gram model */
@@ -308,7 +304,5 @@ int main(int argc, char * argv[]){
taglib_fini();
- phrase_index.destroy_tokens(tokens);
-
return 0;
}