Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'dnn/nnet.c')
-rw-r--r--dnn/nnet.c72
1 files changed, 0 insertions, 72 deletions
diff --git a/dnn/nnet.c b/dnn/nnet.c
index 3275b534..8ac4518e 100644
--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -115,78 +115,6 @@ void compute_glu(const LinearLayer *layer, float *output, const float *input, in
}
}
-void _lpcnet_compute_dense(const DenseLayer *layer, float *output, const float *input, int arch)
-{
- LinearLayer matrix;
- celt_assert(input != output);
- matrix.bias = layer->bias;
- matrix.subias = NULL;
- matrix.float_weights = layer->input_weights;
- matrix.weights = NULL;
- matrix.weights_idx = NULL;
- matrix.diag = NULL;
- matrix.nb_inputs = layer->nb_inputs;
- matrix.nb_outputs = layer->nb_neurons;
- matrix.scale = NULL;
- compute_linear(&matrix, output, input, arch);
- compute_activation(output, output, layer->nb_neurons, layer->activation, arch);
-}
-
-#ifdef USE_SU_BIAS
-#define bias_type subias
-#else
-#define bias_type bias
-#endif
-#define MAX_IDX_SIZE 8192
-
-void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input, int arch)
-{
- LinearLayer in_matrix, rec_matrix;
- int i, M, N;
- float bias[3*MAX_RNN_NEURONS_ALL];
- float scale[3*MAX_RNN_NEURONS_ALL];
- M = gru->nb_inputs;
- N = gru->nb_neurons;
-
- in_matrix.bias = bias;
- in_matrix.diag = NULL;
- in_matrix.nb_inputs = M;
- in_matrix.nb_outputs = 3*N;
- in_matrix.subias = bias;
-#ifdef DISABLE_DOT_PROD
- for (i=0;i<3*N;i++) bias[i] = gru->bias[i] + gru_b_condition[i];
- in_matrix.scale = NULL;
- in_matrix.float_weights = gru->input_weights;
- in_matrix.weights = NULL;
-#else
- for (i=0;i<3*N;i++) bias[i] = gru->bias_type[i] + gru_b_condition[i];
- for (i=0;i<3*N;i++) scale[i] = SCALE_1;
- in_matrix.scale = scale;
- in_matrix.weights = gru->input_weights;
- in_matrix.float_weights = NULL;
-#endif
- in_matrix.weights_idx = gru->input_weights_idx;
-
- rec_matrix.bias = &gru->bias[3*N];
- rec_matrix.diag = NULL;
- rec_matrix.nb_inputs = N;
- rec_matrix.nb_outputs = 3*N;
- rec_matrix.scale = scale;
- rec_matrix.subias = &gru->subias[3*N];
-#ifdef DISABLE_DOT_PROD
- rec_matrix.scale = NULL;
- rec_matrix.float_weights = gru->recurrent_weights;
- rec_matrix.weights = NULL;
-#else
- rec_matrix.scale = scale;
- rec_matrix.weights = gru->recurrent_weights;
- rec_matrix.float_weights = NULL;
-#endif
- rec_matrix.weights_idx = NULL;
- compute_generic_gru(&in_matrix, &rec_matrix, state, input, arch);
-}
-
-
#define MAX_CONV_INPUTS_ALL DRED_MAX_CONV_INPUTS
void compute_generic_conv1d(const LinearLayer *layer, float *output, float *mem, const float *input, int input_size, int activation, int arch)