Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/marian-nmt/marian.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRoman Grundkiewicz <rgrundki@exseed.ed.ac.uk>2017-10-28 20:02:26 +0300
committerRoman Grundkiewicz <rgrundki@exseed.ed.ac.uk>2017-10-28 20:02:26 +0300
commitc7e99fc4a503c29a8709269611cefdb9f86ff7c7 (patch)
treef69a3215c00762487e6a58331168dc91c6e25515
parent7f9cfa454891cd61e4a8a81fa9df8e4f370aea52 (diff)
Autoformat
-rw-r--r--src/common/logging.h2
-rw-r--r--src/data/corpus.cpp10
-rw-r--r--src/data/dataset.h4
-rw-r--r--src/data/types.h10
-rw-r--r--src/graph/node_operators_binary.h26
-rw-r--r--src/graph/node_operators_unary.h7
-rw-r--r--src/kernels/sparse.cu14
-rw-r--r--src/kernels/sparse.h7
-rw-r--r--src/kernels/tensor_operators.cu12
-rw-r--r--src/kernels/tensor_operators.h21
-rw-r--r--src/kernels/thrust_functions.h3
-rw-r--r--src/models/transformer.h9
-rw-r--r--src/rnn/types.h2
-rw-r--r--src/training/dropper.h21
-rw-r--r--src/training/validator.cpp3
-rw-r--r--src/training/validator.h3
16 files changed, 103 insertions, 51 deletions
diff --git a/src/common/logging.h b/src/common/logging.h
index 2f053b40..1e1d8518 100644
--- a/src/common/logging.h
+++ b/src/common/logging.h
@@ -48,7 +48,7 @@
#define ABORT_IF(condition, ...) \
do { \
if(condition) { \
- ABORT(__VA_ARGS__); \
+ ABORT(__VA_ARGS__); \
} \
} while(0)
diff --git a/src/data/corpus.cpp b/src/data/corpus.cpp
index 850d5754..b5df6aef 100644
--- a/src/data/corpus.cpp
+++ b/src/data/corpus.cpp
@@ -61,7 +61,10 @@ Corpus::Corpus(Ptr<Config> options, bool translate)
for(size_t i = 0; i < paths_.size(); ++i) {
Ptr<Vocab> vocab = New<Vocab>();
int vocSize = vocab->loadOrCreate("", paths_[i], maxVocabs[i]);
- LOG(info, "[data] Setting vocabulary size for input {} to {}", i, vocSize);
+ LOG(info,
+ "[data] Setting vocabulary size for input {} to {}",
+ i,
+ vocSize);
options_->get()["dim-vocabs"][i] = vocSize;
options_->get()["vocabs"].push_back(paths_[i] + ".yml");
@@ -76,7 +79,10 @@ Corpus::Corpus(Ptr<Config> options, bool translate)
Ptr<Vocab> vocab = New<Vocab>();
int vocSize
= vocab->loadOrCreate(vocabPaths[i], paths_[i], maxVocabs[i]);
- LOG(info, "[data] Setting vocabulary size for input {} to {}", i, vocSize);
+ LOG(info,
+ "[data] Setting vocabulary size for input {} to {}",
+ i,
+ vocSize);
options_->get()["dim-vocabs"][i] = vocSize;
vocabs_.emplace_back(vocab);
diff --git a/src/data/dataset.h b/src/data/dataset.h
index 76edae9b..d019b3b3 100644
--- a/src/data/dataset.h
+++ b/src/data/dataset.h
@@ -77,9 +77,7 @@ public:
void push_back(Input input) { inputs_.push_back(input); }
- virtual std::vector<Ptr<Batch>> split(size_t n) {
- ABORT("Not implemented");
- }
+ virtual std::vector<Ptr<Batch>> split(size_t n) { ABORT("Not implemented"); }
Data& features() { return inputs_[0].data(); }
diff --git a/src/data/types.h b/src/data/types.h
index f3224999..3e36c454 100644
--- a/src/data/types.h
+++ b/src/data/types.h
@@ -27,10 +27,16 @@ const std::string DEL_STR = "<d>";
const std::string RPL_STR = "<r>";
const std::unordered_map<std::string, Word> SPEC2SYM = {
- {STP_STR, STP_ID}, {CPY_STR, CPY_ID}, {DEL_STR, DEL_ID}, {RPL_STR, RPL_ID},
+ {STP_STR, STP_ID},
+ {CPY_STR, CPY_ID},
+ {DEL_STR, DEL_ID},
+ {RPL_STR, RPL_ID},
};
const std::unordered_map<Word, std::string> SYM2SPEC = {
- {STP_ID, STP_STR}, {CPY_ID, CPY_STR}, {DEL_ID, DEL_STR}, {RPL_ID, RPL_STR},
+ {STP_ID, STP_STR},
+ {CPY_ID, CPY_STR},
+ {DEL_ID, DEL_STR},
+ {RPL_ID, RPL_STR},
};
} \ No newline at end of file
diff --git a/src/graph/node_operators_binary.h b/src/graph/node_operators_binary.h
index 4f84f631..3eaace1f 100644
--- a/src/graph/node_operators_binary.h
+++ b/src/graph/node_operators_binary.h
@@ -41,10 +41,15 @@ private:
public:
template <typename... Args>
- DotNodeOp(
- Expr a, Expr b, bool transA, bool transB, float scalar, Args... args)
- : NaryNodeOp(
- {a, b}, keywords::shape = newShape(a, b, transA, transB), args...),
+ DotNodeOp(Expr a,
+ Expr b,
+ bool transA,
+ bool transB,
+ float scalar,
+ Args... args)
+ : NaryNodeOp({a, b},
+ keywords::shape = newShape(a, b, transA, transB),
+ args...),
transA_(transA),
transB_(transB),
scalar_(scalar) {}
@@ -182,10 +187,15 @@ private:
public:
template <typename... Args>
- DotBatchedNodeOp(
- Expr a, Expr b, bool transA, bool transB, float scalar, Args... args)
- : NaryNodeOp(
- {a, b}, keywords::shape = newShape(a, b, transA, transB), args...),
+ DotBatchedNodeOp(Expr a,
+ Expr b,
+ bool transA,
+ bool transB,
+ float scalar,
+ Args... args)
+ : NaryNodeOp({a, b},
+ keywords::shape = newShape(a, b, transA, transB),
+ args...),
transA_(transA),
transB_(transB),
scalar_(scalar) {}
diff --git a/src/graph/node_operators_unary.h b/src/graph/node_operators_unary.h
index f42b7eed..7455d7f5 100644
--- a/src/graph/node_operators_unary.h
+++ b/src/graph/node_operators_unary.h
@@ -237,8 +237,11 @@ struct SwishNodeOp : public UnaryNodeOp {
}
NodeOps backwardOps() {
- return {NodeOp(
- Add(_1 * (_3 + Sigma(_2) * (1.f - _3)), child(0)->grad(), adj_, child(0)->val(), val_))};
+ return {NodeOp(Add(_1 * (_3 + Sigma(_2) * (1.f - _3)),
+ child(0)->grad(),
+ adj_,
+ child(0)->val(),
+ val_))};
}
const std::string type() { return "swish"; }
diff --git a/src/kernels/sparse.cu b/src/kernels/sparse.cu
index adc9eede..1d104474 100644
--- a/src/kernels/sparse.cu
+++ b/src/kernels/sparse.cu
@@ -7,8 +7,11 @@ namespace marian {
namespace sparse {
-void multiply(
- Ptr<CSR> C, const Ptr<CSR> A, const Ptr<CSR> B, bool transA, bool transB) {
+void multiply(Ptr<CSR> C,
+ const Ptr<CSR> A,
+ const Ptr<CSR> B,
+ bool transA,
+ bool transB) {
cudaSetDevice(C->getDevice());
int nnzTotal;
C->allocRowIndices(A->rows());
@@ -130,8 +133,11 @@ void LfaForward(Tensor out, Tensor logits, Tensor att, Ptr<CSR> sparseLf) {
sparseLfa->toTensor(out);
}
-__global__ void gCollapseAtt(
- float* out, const float* in, int batch, int srcWords, int nonzeros) {
+__global__ void gCollapseAtt(float* out,
+ const float* in,
+ int batch,
+ int srcWords,
+ int nonzeros) {
for(int bid = 0; bid < nonzeros; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < nonzeros) {
diff --git a/src/kernels/sparse.h b/src/kernels/sparse.h
index 5ad0112b..625ebe4b 100644
--- a/src/kernels/sparse.h
+++ b/src/kernels/sparse.h
@@ -197,8 +197,11 @@ public:
}
};
-void multiply(
- Ptr<CSR>, const Ptr<CSR>, const Ptr<CSR>, bool = false, bool = false);
+void multiply(Ptr<CSR>,
+ const Ptr<CSR>,
+ const Ptr<CSR>,
+ bool = false,
+ bool = false);
void LfaForward(Tensor out, Tensor logits, Tensor att, Ptr<CSR> sparseLf);
diff --git a/src/kernels/tensor_operators.cu b/src/kernels/tensor_operators.cu
index 0281163c..d88c8383 100644
--- a/src/kernels/tensor_operators.cu
+++ b/src/kernels/tensor_operators.cu
@@ -17,8 +17,7 @@ __device__ inline float stableLogit(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
- }
- else {
+ } else {
float z = expf(x);
return z / (1.0 + z);
}
@@ -170,7 +169,6 @@ void Deconcatenate(std::vector<Tensor>& outputs, const Tensor in, int ax) {
SplitCont(outputs, in, ax);
}
-
__global__ void gTranspose4D(float* out,
ShapeGPU outShape,
const float* in,
@@ -955,7 +953,6 @@ __global__ void gGRUFastForward(float* out,
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
-
float r = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
@@ -1526,8 +1523,11 @@ __global__ void gLNormalization(float* out,
}
}
-void LayerNormalization(
- Tensor out, Tensor in, Tensor gamma, Tensor beta, float eps) {
+void LayerNormalization(Tensor out,
+ Tensor in,
+ Tensor gamma,
+ Tensor beta,
+ float eps) {
cudaSetDevice(out->getDevice());
int rows = in->shape()[0] * in->shape()[2] * in->shape()[3];
diff --git a/src/kernels/tensor_operators.h b/src/kernels/tensor_operators.h
index 17c18cfb..4ca2386f 100644
--- a/src/kernels/tensor_operators.h
+++ b/src/kernels/tensor_operators.h
@@ -367,8 +367,11 @@ __global__ void gAdd1R3(Functor functor,
}
template <class Functor>
-void Add(
- Functor functor, Tensor out, Tensor in1, Tensor in2, float scale = 1.0) {
+void Add(Functor functor,
+ Tensor out,
+ Tensor in1,
+ Tensor in2,
+ float scale = 1.0) {
cudaSetDevice(out->getDevice());
auto full = out->shape();
@@ -427,8 +430,11 @@ void Add(
}
template <class Functor>
-void Reduce(
- Functor functor, Tensor out, Tensor in1, Tensor in2, float scale = 1.0) {
+void Reduce(Functor functor,
+ Tensor out,
+ Tensor in1,
+ Tensor in2,
+ float scale = 1.0) {
out->set(0);
Add(functor, out, in1, in2, scale);
}
@@ -1116,8 +1122,11 @@ void AttBack(Tensor gva,
Tensor coverage,
Tensor adj);
-void LayerNormalization(
- Tensor out, Tensor in, Tensor gamma, Tensor beta, float eps = 1e-9);
+void LayerNormalization(Tensor out,
+ Tensor in,
+ Tensor gamma,
+ Tensor beta,
+ float eps = 1e-9);
void LayerNormalizationGrad(Tensor gradX,
Tensor gradGamma,
Tensor gradBeta,
diff --git a/src/kernels/thrust_functions.h b/src/kernels/thrust_functions.h
index b49f0983..67f37a13 100644
--- a/src/kernels/thrust_functions.h
+++ b/src/kernels/thrust_functions.h
@@ -37,8 +37,7 @@ struct unary_sigma : public thrust::unary_function<T, T> {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
- }
- else {
+ } else {
float z = expf(x);
return z / (1.0 + z);
}
diff --git a/src/models/transformer.h b/src/models/transformer.h
index 0a4bded0..d3c9f406 100644
--- a/src/models/transformer.h
+++ b/src/models/transformer.h
@@ -173,8 +173,8 @@ public:
int dimBeamQ = q->shape()[3];
int dimBeamK = k->shape()[3];
if(dimBeamQ != dimBeamK) {
- k = concatenate(std::vector<Expr>(dimBeamQ, k), axis=3);
- v = concatenate(std::vector<Expr>(dimBeamQ, v), axis=3);
+ k = concatenate(std::vector<Expr>(dimBeamQ, k), axis = 3);
+ v = concatenate(std::vector<Expr>(dimBeamQ, v), axis = 3);
}
auto weights = softmax(bdot(q, k, false, true, scale) + mask);
@@ -245,7 +245,7 @@ public:
Expr output;
if(outputs.size() > 1)
- output = concatenate(outputs, axis=1);
+ output = concatenate(outputs, axis = 1);
else
output = outputs.front();
@@ -550,7 +550,8 @@ public:
for(int i = 1; i <= opt<int>("dec-depth"); ++i) {
auto values = query;
if(prevDecoderStates.size() > 0)
- values = concatenate({prevDecoderStates[i - 1].output, query}, axis=0);
+ values
+ = concatenate({prevDecoderStates[i - 1].output, query}, axis = 0);
decoderStates.push_back({values, nullptr});
diff --git a/src/rnn/types.h b/src/rnn/types.h
index e73513e2..9e288d5a 100644
--- a/src/rnn/types.h
+++ b/src/rnn/types.h
@@ -243,7 +243,7 @@ public:
virtual std::vector<Expr> getLazyInputs(Ptr<rnn::RNN> parent) {
ABORT_IF(!stackables_[0]->is<Cell>(),
- "First stackable should be of type Cell");
+ "First stackable should be of type Cell");
return stackables_[0]->as<Cell>()->getLazyInputs(parent);
}
diff --git a/src/training/dropper.h b/src/training/dropper.h
index 2b6a4ab1..0c19d327 100644
--- a/src/training/dropper.h
+++ b/src/training/dropper.h
@@ -12,8 +12,11 @@
namespace marian {
-__global__ void grad_drop(
- float* data, float* tmp, float* errors, float cut_off, int max_size) {
+__global__ void grad_drop(float* data,
+ float* tmp,
+ float* errors,
+ float cut_off,
+ int max_size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= max_size)
return;
@@ -63,8 +66,11 @@ __global__ void buildIndices(float* denseData,
}
}
-__global__ void randomSampling(
- float* originalData, float* data, int size, int scale, int fullSize) {
+__global__ void randomSampling(float* originalData,
+ float* data,
+ int size,
+ int scale,
+ int fullSize) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= size)
return;
@@ -78,8 +84,11 @@ class GradientDropBase {
int step;
int _device;
- void grad_drop_do(
- float* data, float* errors, float* tmp, int len, float rate) {
+ void grad_drop_do(float* data,
+ float* errors,
+ float* tmp,
+ int len,
+ float rate) {
int threads = 512;
int blocks = 1 + len / threads;
cudaSetDevice(_device);
diff --git a/src/training/validator.cpp b/src/training/validator.cpp
index 4fbd8672..60ce60cd 100644
--- a/src/training/validator.cpp
+++ b/src/training/validator.cpp
@@ -3,7 +3,8 @@
namespace marian {
std::vector<Ptr<Validator<data::Corpus>>> Validators(
- std::vector<Ptr<Vocab>> vocabs, Ptr<Config> config) {
+ std::vector<Ptr<Vocab>> vocabs,
+ Ptr<Config> config) {
std::vector<Ptr<Validator<data::Corpus>>> validators;
auto validMetrics = config->get<std::vector<std::string>>("valid-metrics");
diff --git a/src/training/validator.h b/src/training/validator.h
index 12454997..ec803d5d 100644
--- a/src/training/validator.h
+++ b/src/training/validator.h
@@ -305,5 +305,6 @@ protected:
* @return Vector of validator objects
*/
std::vector<Ptr<Validator<data::Corpus>>> Validators(
- std::vector<Ptr<Vocab>> vocabs, Ptr<Config> config);
+ std::vector<Ptr<Vocab>> vocabs,
+ Ptr<Config> config);
}