Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/mosesdecoder.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/moses
diff options
context:
space:
mode:
authorEva Hasler <ehasler@saxnot.inf.ed.ac.uk>2012-04-29 08:37:48 +0400
committerEva Hasler <ehasler@saxnot.inf.ed.ac.uk>2012-04-29 08:37:48 +0400
commitef552fe91a99fd4838d083f6d0fcecbe429db2be (patch)
tree58f01754b7ee03a3103818a5ab05e96fa7c797c4 /moses
parentf09c962f76d874216c2829546134a7fdbdae59f5 (diff)
remove caching of wp weight and translation weights, clean up mira code
Diffstat (limited to 'moses')
-rw-r--r--moses/src/BleuScoreFeature.cpp35
-rw-r--r--moses/src/BleuScoreFeature.h7
-rw-r--r--moses/src/ChartRuleLookupManagerOnDisk.cpp1
-rw-r--r--moses/src/FeatureVector.cpp15
-rw-r--r--moses/src/FeatureVector.h1
-rw-r--r--moses/src/PDTAimp.h4
-rw-r--r--moses/src/PhraseDictionary.cpp1
-rw-r--r--moses/src/PhraseDictionaryALSuffixArray.cpp1
-rw-r--r--moses/src/PhraseDictionaryOnDisk.cpp1
-rw-r--r--moses/src/ScoreComponentCollection.h8
-rw-r--r--moses/src/ScoreProducer.h4
-rw-r--r--moses/src/SearchCubePruning.cpp2
-rw-r--r--moses/src/StaticData.cpp54
-rw-r--r--moses/src/StaticData.h12
-rw-r--r--moses/src/TargetPhrase.cpp1
-rw-r--r--moses/src/TranslationSystem.cpp10
16 files changed, 116 insertions, 41 deletions
diff --git a/moses/src/BleuScoreFeature.cpp b/moses/src/BleuScoreFeature.cpp
index db2a4f568..223f5fa0c 100644
--- a/moses/src/BleuScoreFeature.cpp
+++ b/moses/src/BleuScoreFeature.cpp
@@ -92,8 +92,7 @@ void BleuScoreFeature::PrintHistory(std::ostream& out) const {
void BleuScoreFeature::SetBleuParameters(bool sentenceBleu, bool scaleByInputLength, bool scaleByAvgInputLength,
bool scaleByInverseLength, bool scaleByAvgInverseLength,
- float scaleByX, float historySmoothing, size_t scheme, float relaxBP,
- bool useSourceLengthHistory) {
+ float scaleByX, float historySmoothing, size_t scheme) {
m_sentence_bleu = sentenceBleu;
m_scale_by_input_length = scaleByInputLength;
m_scale_by_avg_input_length = scaleByAvgInputLength;
@@ -102,8 +101,6 @@ void BleuScoreFeature::SetBleuParameters(bool sentenceBleu, bool scaleByInputLen
m_scale_by_x = scaleByX;
m_historySmoothing = historySmoothing;
m_smoothing_scheme = (SmoothingScheme)scheme;
- m_relax_BP = relaxBP;
- m_useSourceLengthHistory = useSourceLengthHistory;
}
// Incoming references (refs) are stored as refs[file_id][[sent_id][reference]]
@@ -633,7 +630,7 @@ float BleuScoreFeature::CalculateBleu(BleuScoreState* state) const {
smoothed_count += 1;
}
break;
- case LIGHT:
+ case PLUS_POINT_ONE:
if (i > 0) {
// smoothing for all n > 1
smoothed_matches += 0.1;
@@ -662,9 +659,9 @@ float BleuScoreFeature::CalculateBleu(BleuScoreState* state) const {
// where
// c: length of the candidate translation
// r: effective reference length (sum of best match lengths for each candidate sentence)
- if (state->m_target_length < (state->m_scaled_ref_length * m_relax_BP)) {
+ if (state->m_target_length < state->m_scaled_ref_length) {
float smoothed_target_length = m_target_length_history + state->m_target_length;
- float smoothed_ref_length = m_ref_length_history + (state->m_scaled_ref_length * m_relax_BP);
+ float smoothed_ref_length = m_ref_length_history + state->m_scaled_ref_length;
precision *= exp(1 - (smoothed_ref_length/ smoothed_target_length));
}
@@ -696,8 +693,8 @@ float BleuScoreFeature::CalculateBleu(BleuScoreState* state) const {
// = BP * 4th root(PRODUCT_1_4 p_n)
for (size_t i = 0; i < BleuScoreState::bleu_order; i++) {
if (state->m_ngram_counts[i]) {
- smoothed_matches = m_match_history[i] + state->m_ngram_matches[i];
- smoothed_count = m_count_history[i] + state->m_ngram_counts[i];
+ smoothed_matches = m_match_history[i] + state->m_ngram_matches[i] + 0.1;
+ smoothed_count = m_count_history[i] + state->m_ngram_counts[i] + 0.1;
precision *= smoothed_matches / smoothed_count;
}
}
@@ -705,18 +702,18 @@ float BleuScoreFeature::CalculateBleu(BleuScoreState* state) const {
// take geometric mean
precision = pow(precision, (float)1/4);
- // BP
+ // Apply brevity penalty if applicable.
if (m_target_length_history + state->m_target_length < m_ref_length_history + state->m_scaled_ref_length)
precision *= exp(1 - (m_ref_length_history + state->m_scaled_ref_length/m_target_length_history + state->m_target_length));
-// cerr << "precision: " << precision << endl;
+ //cerr << "\nprecision: " << precision << endl;
// **BLEU score of pseudo-document**
float precision_pd = 1.0;
if (m_target_length_history > 0) {
for (size_t i = 0; i < BleuScoreState::bleu_order; i++)
if (m_count_history[i] != 0)
- precision_pd *= m_match_history[i]/m_count_history[i];
+ precision_pd *= (m_match_history[i] + 0.1)/(m_count_history[i] + 0.1);
// take geometric mean
precision_pd = pow(precision_pd, (float)1/4);
@@ -729,18 +726,16 @@ float BleuScoreFeature::CalculateBleu(BleuScoreState* state) const {
precision_pd = 0;
// **end BLEU of pseudo-document**
-// cerr << "precision pd: " << precision_pd << endl;
+ //cerr << "precision pd: " << precision_pd << endl;
float sentence_impact;
- if (m_target_length_history > 0) {
- if (m_source_length_history)
- sentence_impact = m_source_length_history * (precision - precision_pd);
- else
- sentence_impact = m_target_length_history * (precision - precision_pd);
- }
+ if (m_target_length_history > 0)
+ sentence_impact = m_target_length_history * (precision - precision_pd);
else
- sentence_impact = precision;
+ sentence_impact = precision;
+ sentence_impact *= 10;
+ //cerr << "sentence impact: " << sentence_impact << endl;
return sentence_impact;
}
}
diff --git a/moses/src/BleuScoreFeature.h b/moses/src/BleuScoreFeature.h
index 988db6a27..1c0e1825c 100644
--- a/moses/src/BleuScoreFeature.h
+++ b/moses/src/BleuScoreFeature.h
@@ -66,7 +66,7 @@ public:
m_scale_by_x(1),
m_historySmoothing(0.9),
m_useSourceLengthHistory(0),
- m_smoothing_scheme(PLUS_ONE),
+ m_smoothing_scheme(PLUS_POINT_ONE),
m_relax_BP(1) {}
std::string GetScoreProducerDescription() const
@@ -93,8 +93,7 @@ public:
void PrintRefLength(const std::vector<size_t>& ref_ids);
void SetBleuParameters(bool sentenceBleu, bool scaleByInputLength, bool scaleByAvgInputLength,
bool scaleByInverseLength, bool scaleByAvgInverseLength,
- float scaleByX, float historySmoothing, size_t scheme, float relaxBP,
- bool useSourceLengthHistory);
+ float scaleByX, float historySmoothing, size_t scheme);
void GetNgramMatchCounts(Phrase&,
const NGrams&,
@@ -163,7 +162,7 @@ private:
float m_historySmoothing;
bool m_useSourceLengthHistory;
- enum SmoothingScheme { PLUS_ONE = 1, LIGHT = 2, PAPINENI = 3 };
+ enum SmoothingScheme { PLUS_ONE = 1, PLUS_POINT_ONE = 2, PAPINENI = 3 };
SmoothingScheme m_smoothing_scheme;
// relax application of the BP by setting a value between 0 and 1
diff --git a/moses/src/ChartRuleLookupManagerOnDisk.cpp b/moses/src/ChartRuleLookupManagerOnDisk.cpp
index 77e699a08..17e57f73c 100644
--- a/moses/src/ChartRuleLookupManagerOnDisk.cpp
+++ b/moses/src/ChartRuleLookupManagerOnDisk.cpp
@@ -242,7 +242,6 @@ void ChartRuleLookupManagerOnDisk::GetChartRuleCollection(
const OnDiskPt::TargetPhraseCollection *tpcollBerkeleyDb = node->GetTargetPhraseCollection(m_dictionary.GetTableLimit(), m_dbWrapper);
std::vector<float> weightT = staticData.GetTranslationSystem(TranslationSystem::DEFAULT).GetTranslationWeights();
- cerr << "Read weightT from translation sytem.. " << std::endl;
targetPhraseCollection
= tpcollBerkeleyDb->ConvertToMoses(m_inputFactorsVec
,m_outputFactorsVec
diff --git a/moses/src/FeatureVector.cpp b/moses/src/FeatureVector.cpp
index 81099ab6c..f2c17813a 100644
--- a/moses/src/FeatureVector.cpp
+++ b/moses/src/FeatureVector.cpp
@@ -323,6 +323,17 @@ namespace Moses {
m_coreFeatures[i] = logOfValue;
}
}
+
+ void FVector::printCoreFeatures() {
+ cerr << "core=(";
+ for (size_t i = 0; i < m_coreFeatures.size(); ++i) {
+ cerr << m_coreFeatures[i];
+ if (i + 1 < m_coreFeatures.size()) {
+ cerr << ",";
+ }
+ }
+ cerr << ") ";
+ }
FVector& FVector::operator+= (const FVector& rhs) {
if (rhs.m_coreFeatures.size() > m_coreFeatures.size())
@@ -519,9 +530,9 @@ namespace Moses {
FValue FVector::l1norm_coreFeatures() const {
FValue norm = 0;
- for (size_t i = 0; i < m_coreFeatures.size(); ++i) {
+ // ignore Bleu score feature (last feature)
+ for (size_t i = 0; i < m_coreFeatures.size()-1; ++i)
norm += abs(m_coreFeatures[i]);
- }
return norm;
}
diff --git a/moses/src/FeatureVector.h b/moses/src/FeatureVector.h
index 96d1db29f..ec693f220 100644
--- a/moses/src/FeatureVector.h
+++ b/moses/src/FeatureVector.h
@@ -197,6 +197,7 @@ namespace Moses {
/** additional */
void logCoreFeatures(size_t baseOfLog);
+ void printCoreFeatures();
//scale so that abs. value is less than maxvalue
void thresholdScale(float maxValue );
diff --git a/moses/src/PDTAimp.h b/moses/src/PDTAimp.h
index 34a5d73b7..7bff39c07 100644
--- a/moses/src/PDTAimp.h
+++ b/moses/src/PDTAimp.h
@@ -447,14 +447,12 @@ public:
//tally up
std::vector<float> weightT = system.GetTranslationWeights();
- std::cerr << "Read weightT from translation sytem.. " << std::endl;
//float score=std::inner_product(nscores.begin(), nscores.end(), m_weights.begin(), 0.0f);
float score=std::inner_product(nscores.begin(), nscores.end(), weightT.begin(), 0.0f);
//count word penalty
float weightWP = system.GetWeightWordPenalty();
- std::cerr << "Read weightWP from translation sytem: " << weightWP << std::endl;
- //score-=tcands[i].tokens.size() * m_weightWP;
+ //score-=tcands[i].tokens.size() * m_weightWP;
score-=tcands[i].tokens.size() * weightWP;
std::pair<E2Costs::iterator,bool> p=e2costs.insert(std::make_pair(tcands[i].tokens,TScores()));
diff --git a/moses/src/PhraseDictionary.cpp b/moses/src/PhraseDictionary.cpp
index 20eef291f..0fc39a286 100644
--- a/moses/src/PhraseDictionary.cpp
+++ b/moses/src/PhraseDictionary.cpp
@@ -79,7 +79,6 @@ PhraseDictionary* PhraseDictionaryFeature::LoadPhraseTable(const TranslationSyst
{
const StaticData& staticData = StaticData::Instance();
std::vector<float> weightT = system->GetTranslationWeights();
- cerr << "Read weightT from translation sytem.. " << std::endl;
if (m_implementation == Memory) {
// memory phrase table
diff --git a/moses/src/PhraseDictionaryALSuffixArray.cpp b/moses/src/PhraseDictionaryALSuffixArray.cpp
index f12f2939e..71b810a91 100644
--- a/moses/src/PhraseDictionaryALSuffixArray.cpp
+++ b/moses/src/PhraseDictionaryALSuffixArray.cpp
@@ -57,7 +57,6 @@ void PhraseDictionaryALSuffixArray::InitializeForInput(InputType const& source)
std::auto_ptr<RuleTableLoader> loader =
RuleTableLoaderFactory::Create(grammarFile);
std::vector<float> weightT = StaticData::Instance().GetTranslationSystem(TranslationSystem::DEFAULT).GetTranslationWeights();
- cerr << "Read weightT from translation sytem.. " << std::endl;
//bool ret = loader->Load(*m_input, *m_output, inFile, *m_weight, m_tableLimit,
bool ret = loader->Load(*m_input, *m_output, inFile, weightT, m_tableLimit,
*m_languageModels, m_wpProducer, *this);
diff --git a/moses/src/PhraseDictionaryOnDisk.cpp b/moses/src/PhraseDictionaryOnDisk.cpp
index 50ce05d57..3c89408f5 100644
--- a/moses/src/PhraseDictionaryOnDisk.cpp
+++ b/moses/src/PhraseDictionaryOnDisk.cpp
@@ -95,7 +95,6 @@ ChartRuleLookupManager *PhraseDictionaryOnDisk::CreateRuleLookupManager(
const ChartCellCollection &cellCollection)
{
std::vector<float> weightT = StaticData::Instance().GetTranslationSystem(TranslationSystem::DEFAULT).GetTranslationWeights();
- cerr << "Read weightT from translation sytem.. " << std::endl;
return new ChartRuleLookupManagerOnDisk(sentence, cellCollection, *this,
m_dbWrapper, m_languageModels,
m_wpProducer, m_inputFactorsVec,
diff --git a/moses/src/ScoreComponentCollection.h b/moses/src/ScoreComponentCollection.h
index dd6a5c855..8c57c82ad 100644
--- a/moses/src/ScoreComponentCollection.h
+++ b/moses/src/ScoreComponentCollection.h
@@ -78,6 +78,10 @@ private:
}
public:
+ static void ResetCounter() {
+ s_denseVectorSize = 0;
+ }
+
//! Create a new score collection with all values set to 0.0
ScoreComponentCollection();
@@ -314,6 +318,10 @@ public:
void LogCoreFeatures(size_t baseOfLog) {
m_scores.logCoreFeatures(baseOfLog);
}
+
+ void PrintCoreFeatures() {
+ m_scores.printCoreFeatures();
+ }
void ThresholdScaling(float maxValue)
{
diff --git a/moses/src/ScoreProducer.h b/moses/src/ScoreProducer.h
index 65b655972..c88e5efca 100644
--- a/moses/src/ScoreProducer.h
+++ b/moses/src/ScoreProducer.h
@@ -34,6 +34,10 @@ public:
static const size_t unlimited;
+ static void ResetDescriptionCounts() {
+ description_counts.clear();
+ }
+
//! returns the number of scores that a subclass produces.
//! For example, a language model conventionally produces 1, a translation table some arbitrary number, etc
//! sparse features returned unlimited
diff --git a/moses/src/SearchCubePruning.cpp b/moses/src/SearchCubePruning.cpp
index 4f98208bb..9fe6aec24 100644
--- a/moses/src/SearchCubePruning.cpp
+++ b/moses/src/SearchCubePruning.cpp
@@ -145,7 +145,7 @@ void SearchCubePruning::ProcessSentence()
stackNo++;
}
- PrintBitmapContainerGraph();
+ //PrintBitmapContainerGraph();
// some more logging
IFVERBOSE(2) {
diff --git a/moses/src/StaticData.cpp b/moses/src/StaticData.cpp
index 5ab5f9927..9c02e48e6 100644
--- a/moses/src/StaticData.cpp
+++ b/moses/src/StaticData.cpp
@@ -116,6 +116,49 @@ StaticData::StaticData()
Phrase::InitializeMemPool();
}
+void StaticData::ClearData() {
+ for (size_t i=0; i < m_decodeGraphs.size(); ++i)
+ delete m_decodeGraphs[i];
+ m_decodeGraphs.clear();
+ m_decodeGraphBackoff.clear();
+
+ m_translationSystems.clear();
+ for (size_t i=0; i < m_wordPenaltyProducers.size(); ++i) {
+ ScoreComponentCollection::UnregisterScoreProducer(m_wordPenaltyProducers[i]);
+ delete m_wordPenaltyProducers[i];
+ }
+ m_wordPenaltyProducers.clear();
+ for (size_t i=0; i < m_distortionScoreProducers.size(); ++i) {
+ ScoreComponentCollection::UnregisterScoreProducer(m_distortionScoreProducers[i]);
+ delete m_distortionScoreProducers[i];
+ }
+ m_distortionScoreProducers.clear();
+ for (size_t i=0; i < m_phraseDictionary.size(); ++i) {
+ ScoreComponentCollection::UnregisterScoreProducer(m_phraseDictionary[i]);
+ delete m_phraseDictionary[i];
+ }
+ m_phraseDictionary.clear();
+ for (size_t i=0; i < m_reorderModels.size(); ++i) {
+ ScoreComponentCollection::UnregisterScoreProducer(m_reorderModels[i]);
+ delete m_reorderModels[i];
+ }
+ m_reorderModels.clear();
+ for (LMList::const_iterator k = m_languageModel.begin(); k != m_languageModel.end(); ++k) {
+ ScoreComponentCollection::UnregisterScoreProducer(*k);
+ // delete *k;
+ }
+ m_languageModel.CleanUp();
+
+ ScoreComponentCollection::UnregisterScoreProducer(m_bleuScoreFeature);
+ ScoreComponentCollection::UnregisterScoreProducer(m_unknownWordPenaltyProducer);
+
+ m_inputFactorOrder.clear();
+ m_outputFactorOrder.clear();
+
+ ScoreComponentCollection::ResetCounter();
+ ScoreProducer::ResetDescriptionCounts();
+}
+
bool StaticData::LoadData(Parameter *parameter)
{
ResetUserTime();
@@ -282,6 +325,8 @@ bool StaticData::LoadData(Parameter *parameter)
m_useTransOptCache = false;
}
+ std::cerr << "transOptCache: " << m_useTransOptCache << std::endl;
+ std::cerr << "transOptCache max size: " << m_transOptCacheMaxSize << std::endl;
//input factors
const vector<string> &inputFactorVector = m_parameter->GetParam("input-factors");
@@ -343,6 +388,7 @@ bool StaticData::LoadData(Parameter *parameter)
// settings for pruning
m_maxHypoStackSize = (m_parameter->GetParam("stack").size() > 0)
? Scan<size_t>(m_parameter->GetParam("stack")[0]) : DEFAULT_MAX_HYPOSTACK_SIZE;
+ std::cerr << "max stack size: " << m_maxHypoStackSize << std::endl;
m_minHypoStackDiversity = 0;
if (m_parameter->GetParam("stack-diversity").size() > 0) {
if (m_maxDistortion > 15) {
@@ -366,6 +412,10 @@ bool StaticData::LoadData(Parameter *parameter)
TransformScore(Scan<float>(m_parameter->GetParam("translation-option-threshold")[0]))
: TransformScore(DEFAULT_TRANSLATION_OPTION_THRESHOLD);
+ std::cerr << "beamwidth: " << m_beamWidth << std::endl;
+ std::cerr << "early discarding threshold: " << m_earlyDiscardingThreshold << std::endl;
+ std::cerr << "translOptThreshold: " << m_translationOptionThreshold << std::endl;
+
m_maxNoTransOptPerCoverage = (m_parameter->GetParam("max-trans-opt-per-coverage").size() > 0)
? Scan<size_t>(m_parameter->GetParam("max-trans-opt-per-coverage")[0]) : DEFAULT_MAX_TRANS_OPT_SIZE;
@@ -1697,7 +1747,7 @@ bool StaticData::LoadPhrasePairFeature()
size_t sourceFactorId = Scan<size_t>(factors[0]);
size_t targetFactorId = Scan<size_t>(factors[1]);
- bool simple = true, sourceContext = false, ignorePunctuation = true;
+ bool simple = true, sourceContext = false, ignorePunctuation = false;
if (tokens.size() >= 3) {
simple = Scan<size_t>(tokens[1]);
sourceContext = Scan<size_t>(tokens[2]);
@@ -1837,7 +1887,7 @@ bool StaticData::LoadWordTranslationFeature()
FactorType factorIdSource = Scan<size_t>(factors[0]);
FactorType factorIdTarget = Scan<size_t>(factors[1]);
- bool simple = true, sourceTrigger = false, targetTrigger = false, ignorePunctuation = true;
+ bool simple = true, sourceTrigger = false, targetTrigger = false, ignorePunctuation = false;
if (tokens.size() >= 4) {
simple = Scan<size_t>(tokens[1]);
sourceTrigger = Scan<size_t>(tokens[2]);
diff --git a/moses/src/StaticData.h b/moses/src/StaticData.h
index 6039fd543..a0899a4ec 100644
--- a/moses/src/StaticData.h
+++ b/moses/src/StaticData.h
@@ -302,11 +302,19 @@ public:
//! Load data into static instance. This function is required as LoadData() is not const
static bool LoadDataStatic(Parameter *parameter) {
+ std::cerr << "Load static data.." << std::endl;
return s_instance.LoadData(parameter);
+ std::cerr << "done.." << std::endl;
+ }
+ static void ClearDataStatic() {
+ std::cerr << "Clear static data.." << std::endl;
+ s_instance.ClearData();
+ std::cerr << "done.." << std::endl;
}
//! Main function to load everything. Also initialize the Parameter object
bool LoadData(Parameter *parameter);
+ void ClearData();
const PARAM_VEC &GetParam(const std::string &paramName) const {
return m_parameter->GetParam(paramName);
@@ -477,6 +485,10 @@ public:
LMList GetLMList() const {
return m_languageModel;
}
+ WordPenaltyProducer* GetWordPenaltyProducer() const {
+ assert(m_wordPenaltyProducers.size() >= 1);
+ return m_wordPenaltyProducers[0];
+ }
size_t GetNumInputScores() const {
return m_numInputScores;
}
diff --git a/moses/src/TargetPhrase.cpp b/moses/src/TargetPhrase.cpp
index 20821f1c5..c79b844f8 100644
--- a/moses/src/TargetPhrase.cpp
+++ b/moses/src/TargetPhrase.cpp
@@ -145,7 +145,6 @@ void TargetPhrase::SetScore(const ScoreProducer* translationScoreProducer,
const TranslationSystem& system = StaticData::Instance().GetTranslationSystem(TranslationSystem::DEFAULT);
std::vector<float> weightsT = system.GetTranslationWeights();
weightWP = system.GetWeightWordPenalty();
- VERBOSE(1, cerr << "weightWP: " << weightWP << std::endl);
//m_transScore = std::inner_product(scoreVector.begin(), scoreVector.end(), weightT.begin(), 0.0f);
m_transScore = std::inner_product(scoreVector.begin(), scoreVector.end(), weightsT.begin(), 0.0f);
diff --git a/moses/src/TranslationSystem.cpp b/moses/src/TranslationSystem.cpp
index 2b8bd887c..2086949fd 100644
--- a/moses/src/TranslationSystem.cpp
+++ b/moses/src/TranslationSystem.cpp
@@ -161,7 +161,9 @@ namespace Moses {
}
float TranslationSystem::GetWeightWordPenalty() const {
- return StaticData::Instance().GetWeight(m_wpProducer);
+ float weightWP = StaticData::Instance().GetWeight(m_wpProducer);
+ VERBOSE(1, "Read weightWP from translation sytem: " << weightWP << std::endl);
+ return weightWP;
}
float TranslationSystem::GetWeightUnknownWordPenalty() const {
@@ -175,10 +177,10 @@ namespace Moses {
std::vector<float> TranslationSystem::GetTranslationWeights() const {
std::vector<float> weights = StaticData::Instance().GetWeights(GetTranslationScoreProducer());
- VERBOSE(1, cerr << "Read weightT from translation sytem.. ");
+ VERBOSE(1, "Read weightT from translation sytem.. ");
for (size_t i = 0; i < weights.size(); ++i)
- VERBOSE(1, std::cerr << weights[i] << " ");
- VERBOSE(1, std::cerr << std::endl);
+ VERBOSE(1, weights[i] << " ");
+ VERBOSE(1, std::endl);
return weights;
}
};