Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJean-Marc Valin <jmvalin@amazon.com>2023-10-17 05:01:09 +0300
committerJean-Marc Valin <jmvalin@amazon.com>2023-10-17 05:01:09 +0300
commite7c9bfbbe2cc8a49df88d5541df3c094f8aab8e1 (patch)
tree18e5acdebd0e8295970331d6164ce7c5f8fac8b4
parentca035ef1d23912ad8858bcc1833a4aad61db7853 (diff)
Finish removing LPCNet
And references to nnet_data.h
-rw-r--r--dnn/lpcnet_private.h24
-rw-r--r--dnn/nnet.c5
2 files changed, 3 insertions, 26 deletions
diff --git a/dnn/lpcnet_private.h b/dnn/lpcnet_private.h
index 4dfcadaa..badd736d 100644
--- a/dnn/lpcnet_private.h
+++ b/dnn/lpcnet_private.h
@@ -4,7 +4,6 @@
#include <stdio.h>
#include "freq.h"
#include "lpcnet.h"
-#include "nnet_data.h"
#include "plc_data.h"
#include "kiss99.h"
#include "pitchdnn.h"
@@ -22,28 +21,7 @@
#define CONT_VECTORS 5
-struct LPCNetState {
- LPCNetModel model;
- int arch;
- float sampling_logit_table[256];
- kiss99_ctx rng;
-
-#define LPCNET_RESET_START nnet
- NNetState nnet;
- int last_exc;
- float last_sig[LPC_ORDER];
- float feature_buffer[NB_FEATURES*MAX_FEATURE_BUFFER_SIZE];
- int feature_buffer_fill;
- float last_features[NB_FEATURES];
-#if FEATURES_DELAY>0
- float old_lpc[FEATURES_DELAY][LPC_ORDER];
-#endif
- float gru_a_condition[3*GRU_A_STATE_SIZE];
- float gru_b_condition[3*GRU_B_STATE_SIZE];
- int frame_count;
- float deemph_mem;
- float lpc[LPC_ORDER];
-};
+#define FEATURES_DELAY 1
struct LPCNetEncState{
PitchDNNState pitchdnn;
diff --git a/dnn/nnet.c b/dnn/nnet.c
index 1dc813de..7f4658e0 100644
--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -36,7 +36,6 @@
#include "arch.h"
#include "tansig_table.h"
#include "nnet.h"
-#include "nnet_data.h"
#include "dred_rdovae_constants.h"
#include "plc_data.h"
#include "os_support.h"
@@ -109,7 +108,7 @@ void compute_generic_dense(const LinearLayer *layer, float *output, const float
compute_activation(output, output, layer->nb_outputs, activation);
}
-#define MAX_RNN_NEURONS_ALL IMAX(IMAX(MAX_RNN_NEURONS, PLC_MAX_RNN_NEURONS), DRED_MAX_RNN_NEURONS)
+#define MAX_RNN_NEURONS_ALL IMAX(PLC_MAX_RNN_NEURONS, DRED_MAX_RNN_NEURONS)
void compute_generic_gru(const LinearLayer *input_weights, const LinearLayer *recurrent_weights, float *state, const float *in)
@@ -314,7 +313,7 @@ void compute_sparse_gru(const SparseGRULayer *gru, float *state, const float *in
compute_generic_gru(&in_matrix, &rec_matrix, state, input);
}
-#define MAX_CONV_INPUTS_ALL IMAX(MAX_CONV_INPUTS, DRED_MAX_CONV_INPUTS)
+#define MAX_CONV_INPUTS_ALL DRED_MAX_CONV_INPUTS
void compute_generic_conv1d(const LinearLayer *layer, float *output, float *mem, const float *input, int input_size, int activation)
{