Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJean-Marc Valin <jmvalin@amazon.com>2023-07-25 04:07:46 +0300
committerJean-Marc Valin <jmvalin@amazon.com>2023-07-28 02:54:10 +0300
commit60d67b11126e87ec41ae09b34aa4a133a3af5f7c (patch)
tree6b7086e7f9ecce32a86cdd0afaac7e21bf288cd1
parent4171532c80448a36205327e2903d145a5c45e9d9 (diff)
New compute_generic_conv1d()
-rw-r--r--dnn/nnet.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/dnn/nnet.c b/dnn/nnet.c
index c8b2f0c0..2af1743e 100644
--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -333,15 +333,22 @@ void compute_sparse_gru(const SparseGRULayer *gru, float *state, const float *in
#define MAX_CONV_INPUTS_ALL IMAX(MAX_CONV_INPUTS, DRED_MAX_CONV_INPUTS)
+void compute_generic_conv1d(const LinearLayer *layer, float *output, float *mem, const float *input, int input_size, int activation)
+{
+ float tmp[MAX_CONV_INPUTS_ALL];
+ celt_assert(input != output);
+ celt_assert(layer->nb_inputs <= MAX_CONV_INPUTS_ALL);
+ OPUS_COPY(tmp, mem, layer->nb_inputs-input_size);
+ OPUS_COPY(&tmp[layer->nb_inputs-input_size], input, input_size);
+ compute_linear(layer, output, tmp);
+ compute_activation(output, output, layer->nb_outputs, activation);
+ OPUS_COPY(mem, &tmp[input_size], layer->nb_inputs-input_size);
+}
+
void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const float *input)
{
LinearLayer matrix;
int N, M;
- float tmp[MAX_CONV_INPUTS_ALL];
- celt_assert(input != output);
- celt_assert(layer->nb_inputs*layer->kernel_size <= MAX_CONV_INPUTS_ALL);
- OPUS_COPY(tmp, mem, layer->nb_inputs*(layer->kernel_size-1));
- OPUS_COPY(&tmp[layer->nb_inputs*(layer->kernel_size-1)], input, layer->nb_inputs);
M = layer->nb_inputs*layer->kernel_size;
N = layer->nb_neurons;
matrix.bias = layer->bias;
@@ -353,9 +360,7 @@ void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const f
matrix.nb_inputs = M;
matrix.nb_outputs = N;
matrix.scale = NULL;
- compute_linear(&matrix, output, tmp);
- compute_activation(output, output, N, layer->activation);
- OPUS_COPY(mem, &tmp[layer->nb_inputs], layer->nb_inputs*(layer->kernel_size-1));
+ compute_generic_conv1d(&matrix, output, mem, input, layer->nb_inputs, layer->activation);
}
void compute_embedding(const EmbeddingLayer *layer, float *output, int input)