Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJean-Marc Valin <jmvalin@jmvalin.ca>2024-01-19 02:16:54 +0300
committerJean-Marc Valin <jmvalin@jmvalin.ca>2024-01-21 10:11:50 +0300
commit6a9831a6b038638266165dc1e9f115678f0b330e (patch)
tree44c1bfceee0f681950726a60e771fbfe9ccbc9bd
parent1ddfcfd48cb87f8dc29240d705a4da78bae0eb50 (diff)
Remove run-time code for old TF2 modelsexp_plc_fix5
No longer needed now that PLC is trained with PyTorch stack
-rw-r--r--dnn/nnet.c72
-rw-r--r--dnn/nnet.h63
-rw-r--r--dnn/parse_lpcnet_weights.c40
3 files changed, 0 insertions, 175 deletions
diff --git a/dnn/nnet.c b/dnn/nnet.c
index 3275b534..8ac4518e 100644
--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -115,78 +115,6 @@ void compute_glu(const LinearLayer *layer, float *output, const float *input, in
}
}
-void _lpcnet_compute_dense(const DenseLayer *layer, float *output, const float *input, int arch)
-{
- LinearLayer matrix;
- celt_assert(input != output);
- matrix.bias = layer->bias;
- matrix.subias = NULL;
- matrix.float_weights = layer->input_weights;
- matrix.weights = NULL;
- matrix.weights_idx = NULL;
- matrix.diag = NULL;
- matrix.nb_inputs = layer->nb_inputs;
- matrix.nb_outputs = layer->nb_neurons;
- matrix.scale = NULL;
- compute_linear(&matrix, output, input, arch);
- compute_activation(output, output, layer->nb_neurons, layer->activation, arch);
-}
-
-#ifdef USE_SU_BIAS
-#define bias_type subias
-#else
-#define bias_type bias
-#endif
-#define MAX_IDX_SIZE 8192
-
-void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input, int arch)
-{
- LinearLayer in_matrix, rec_matrix;
- int i, M, N;
- float bias[3*MAX_RNN_NEURONS_ALL];
- float scale[3*MAX_RNN_NEURONS_ALL];
- M = gru->nb_inputs;
- N = gru->nb_neurons;
-
- in_matrix.bias = bias;
- in_matrix.diag = NULL;
- in_matrix.nb_inputs = M;
- in_matrix.nb_outputs = 3*N;
- in_matrix.subias = bias;
-#ifdef DISABLE_DOT_PROD
- for (i=0;i<3*N;i++) bias[i] = gru->bias[i] + gru_b_condition[i];
- in_matrix.scale = NULL;
- in_matrix.float_weights = gru->input_weights;
- in_matrix.weights = NULL;
-#else
- for (i=0;i<3*N;i++) bias[i] = gru->bias_type[i] + gru_b_condition[i];
- for (i=0;i<3*N;i++) scale[i] = SCALE_1;
- in_matrix.scale = scale;
- in_matrix.weights = gru->input_weights;
- in_matrix.float_weights = NULL;
-#endif
- in_matrix.weights_idx = gru->input_weights_idx;
-
- rec_matrix.bias = &gru->bias[3*N];
- rec_matrix.diag = NULL;
- rec_matrix.nb_inputs = N;
- rec_matrix.nb_outputs = 3*N;
- rec_matrix.scale = scale;
- rec_matrix.subias = &gru->subias[3*N];
-#ifdef DISABLE_DOT_PROD
- rec_matrix.scale = NULL;
- rec_matrix.float_weights = gru->recurrent_weights;
- rec_matrix.weights = NULL;
-#else
- rec_matrix.scale = scale;
- rec_matrix.weights = gru->recurrent_weights;
- rec_matrix.float_weights = NULL;
-#endif
- rec_matrix.weights_idx = NULL;
- compute_generic_gru(&in_matrix, &rec_matrix, state, input, arch);
-}
-
-
#define MAX_CONV_INPUTS_ALL DRED_MAX_CONV_INPUTS
void compute_generic_conv1d(const LinearLayer *layer, float *output, float *mem, const float *input, int input_size, int activation, int arch)
diff --git a/dnn/nnet.h b/dnn/nnet.h
index 589458a6..2531489c 100644
--- a/dnn/nnet.h
+++ b/dnn/nnet.h
@@ -31,13 +31,6 @@
#include <stddef.h>
#include "opus_types.h"
-#ifdef DISABLE_DOT_PROD
-typedef float qweight;
-#else
-typedef signed char qweight;
-#define DOT_PROD
-#endif
-
#define ACTIVATION_LINEAR 0
#define ACTIVATION_SIGMOID 1
#define ACTIVATION_TANH 2
@@ -91,40 +84,6 @@ typedef struct {
int kheight;
} Conv2dLayer;
-typedef struct {
- const float *bias;
- const float *input_weights;
- int nb_inputs;
- int nb_neurons;
- int activation;
-} DenseLayer;
-
-typedef struct {
- const float *bias;
- const float *subias;
- const qweight *input_weights;
- const int *input_weights_idx;
- const qweight *recurrent_weights;
- int nb_inputs;
- int nb_neurons;
- int activation;
- int reset_after;
-} GRULayer;
-
-typedef struct {
- const float *bias;
- const float *input_weights;
- int nb_inputs;
- int kernel_size;
- int nb_neurons;
- int activation;
-} Conv1DLayer;
-
-typedef struct {
- const float *embedding_weights;
- int nb_inputs;
- int dim;
-} EmbeddingLayer;
void compute_generic_dense(const LinearLayer *layer, float *output, const float *input, int activation, int arch);
void compute_generic_gru(const LinearLayer *input_weights, const LinearLayer *recurrent_weights, float *state, const float *in, int arch);
@@ -134,10 +93,6 @@ void compute_glu(const LinearLayer *layer, float *output, const float *input, in
void compute_gated_activation(const LinearLayer *layer, float *output, const float *input, int activation, int arch);
-void _lpcnet_compute_dense(const DenseLayer *layer, float *output, const float *input, int arch);
-
-void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input, int arch);
-
int parse_weights(WeightArray **list, const unsigned char *data, int len);
@@ -169,24 +124,6 @@ int conv2d_init(Conv2dLayer *layer, const WeightArray *arrays,
int ktime,
int kheight);
-int dense_init(DenseLayer *layer, const WeightArray *arrays,
- const char *bias,
- const char *input_weights,
- int nb_inputs,
- int nb_neurons,
- int activation);
-
-int gru_init(GRULayer *layer, const WeightArray *arrays,
- const char *bias,
- const char *subias,
- const char *input_weights,
- const char *input_weights_idx,
- const char *recurrent_weights,
- int nb_inputs,
- int nb_neurons,
- int activation,
- int reset_after);
-
void compute_linear_c(const LinearLayer *linear, float *out, const float *in);
void compute_activation_c(float *output, const float *input, int N, int activation);
diff --git a/dnn/parse_lpcnet_weights.c b/dnn/parse_lpcnet_weights.c
index c2108593..987fc717 100644
--- a/dnn/parse_lpcnet_weights.c
+++ b/dnn/parse_lpcnet_weights.c
@@ -176,46 +176,6 @@ int linear_init(LinearLayer *layer, const WeightArray *arrays,
return 0;
}
-
-int dense_init(DenseLayer *layer, const WeightArray *arrays,
- const char *bias,
- const char *input_weights,
- int nb_inputs,
- int nb_neurons,
- int activation)
-{
- if ((layer->bias = find_array_check(arrays, bias, nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
- if ((layer->input_weights = find_array_check(arrays, input_weights, nb_inputs*nb_neurons*sizeof(layer->input_weights[0]))) == NULL) return 1;
- layer->nb_inputs = nb_inputs;
- layer->nb_neurons = nb_neurons;
- layer->activation = activation;
- return 0;
-}
-
-int gru_init(GRULayer *layer, const WeightArray *arrays,
- const char *bias,
- const char *subias,
- const char *input_weights,
- const char *input_weights_idx,
- const char *recurrent_weights,
- int nb_inputs,
- int nb_neurons,
- int activation,
- int reset_after)
-{
- int total_blocks;
- if ((layer->bias = find_array_check(arrays, bias, 6*nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
- if ((layer->subias = find_array_check(arrays, subias, 6*nb_neurons*sizeof(layer->subias[0]))) == NULL) return 1;
- if ((layer->input_weights_idx = find_idx_check(arrays, input_weights_idx, nb_inputs, 3*nb_neurons, &total_blocks)) == NULL) return 1;
- if ((layer->input_weights = find_array_check(arrays, input_weights, SPARSE_BLOCK_SIZE*total_blocks*sizeof(layer->input_weights[0]))) == NULL) return 1;
- if ((layer->recurrent_weights = find_array_check(arrays, recurrent_weights, 3*nb_neurons*nb_neurons*sizeof(layer->recurrent_weights[0]))) == NULL) return 1;
- layer->nb_inputs = nb_inputs;
- layer->nb_neurons = nb_neurons;
- layer->activation = activation;
- layer->reset_after = reset_after;
- return 0;
-}
-
int conv2d_init(Conv2dLayer *layer, const WeightArray *arrays,
const char *bias,
const char *float_weights,