Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJean-Marc Valin <jmvalin@amazon.com>2023-12-22 07:05:40 +0300
committerJean-Marc Valin <jmvalin@amazon.com>2023-12-22 07:05:40 +0300
commitcaca188b5a0275b1c04baf8fcc9798b900692a2c (patch)
treefe5295a192530d7dc6b492ae98efc08683051cd2
parentbd710e97f31cbd001c7f13e2c5ece01a3ed487d6 (diff)
Make loss simulator standalone
-rw-r--r--dnn/lossgen.c61
-rw-r--r--dnn/lpcnet_private.h1
-rw-r--r--dnn/nnet.h1
3 files changed, 57 insertions, 6 deletions
diff --git a/dnn/lossgen.c b/dnn/lossgen.c
index 15505873..fa1ee71d 100644
--- a/dnn/lossgen.c
+++ b/dnn/lossgen.c
@@ -6,7 +6,53 @@
#include "lossgen.h"
#include "os_support.h"
#include "nnet.h"
-#include "lpcnet_private.h"
+
+/* Disable RTCD for this. */
+#define RTCD_ARCH c
+
+#include "nnet_arch.h"
+
+#define MAX_RNN_NEURONS_ALL IMAX(LOSSGEN_GRU1_STATE_SIZE, LOSSGEN_GRU2_STATE_SIZE)
+
+/* These two functions are copied from nnet.c to make sure we don't have linking issues. */
+void compute_generic_gru_lossgen(const LinearLayer *input_weights, const LinearLayer *recurrent_weights, float *state, const float *in, int arch)
+{
+ int i;
+ int N;
+ float zrh[3*MAX_RNN_NEURONS_ALL];
+ float recur[3*MAX_RNN_NEURONS_ALL];
+ float *z;
+ float *r;
+ float *h;
+ celt_assert(3*recurrent_weights->nb_inputs == recurrent_weights->nb_outputs);
+ celt_assert(input_weights->nb_outputs == recurrent_weights->nb_outputs);
+ N = recurrent_weights->nb_inputs;
+ z = zrh;
+ r = &zrh[N];
+ h = &zrh[2*N];
+ celt_assert(recurrent_weights->nb_outputs <= 3*MAX_RNN_NEURONS_ALL);
+ celt_assert(in != state);
+ compute_linear(input_weights, zrh, in, arch);
+ compute_linear(recurrent_weights, recur, state, arch);
+ for (i=0;i<2*N;i++)
+ zrh[i] += recur[i];
+ compute_activation(zrh, zrh, 2*N, ACTIVATION_SIGMOID, arch);
+ for (i=0;i<N;i++)
+ h[i] += recur[2*N+i]*r[i];
+ compute_activation(h, h, N, ACTIVATION_TANH, arch);
+ for (i=0;i<N;i++)
+ h[i] = z[i]*state[i] + (1-z[i])*h[i];
+ for (i=0;i<N;i++)
+ state[i] = h[i];
+}
+
+
+void compute_generic_dense_lossgen(const LinearLayer *layer, float *output, const float *input, int activation, int arch)
+{
+ compute_linear(layer, output, input, arch);
+ compute_activation(output, output, layer->nb_outputs, activation, arch);
+}
+
int sample_loss(
LossGenState *st,
@@ -21,10 +67,10 @@ int sample_loss(
LossGen *model = &st->model;
input[0] = st->last_loss;
input[1] = percent_loss;
- compute_generic_dense(&model->lossgen_dense_in, tmp, input, ACTIVATION_TANH, arch);
- compute_generic_gru(&model->lossgen_gru1_input, &model->lossgen_gru1_recurrent, st->gru1_state, tmp, arch);
- compute_generic_gru(&model->lossgen_gru2_input, &model->lossgen_gru2_recurrent, st->gru2_state, st->gru1_state, arch);
- compute_generic_dense(&model->lossgen_dense_out, &out, st->gru2_state, ACTIVATION_SIGMOID, arch);
+ compute_generic_dense_lossgen(&model->lossgen_dense_in, tmp, input, ACTIVATION_TANH, arch);
+ compute_generic_gru_lossgen(&model->lossgen_gru1_input, &model->lossgen_gru1_recurrent, st->gru1_state, tmp, arch);
+ compute_generic_gru_lossgen(&model->lossgen_gru2_input, &model->lossgen_gru2_recurrent, st->gru2_state, st->gru1_state, arch);
+ compute_generic_dense_lossgen(&model->lossgen_dense_out, &out, st->gru2_state, ACTIVATION_SIGMOID, arch);
loss = (float)rand()/RAND_MAX < out;
st->last_loss = loss;
return loss;
@@ -41,6 +87,7 @@ void lossgen_init(LossGenState *st)
ret = 0;
#endif
celt_assert(ret == 0);
+ (void)ret;
}
int lossgen_load_model(LossGenState *st, const unsigned char *data, int len) {
@@ -59,6 +106,10 @@ int main(int argc, char **argv) {
int i, N;
float p;
LossGenState st;
+ if (argc!=3) {
+ fprintf(stderr, "usage: lossgen <percentage> <length>\n");
+ return 1;
+ }
lossgen_init(&st);
p = atof(argv[1]);
N = atoi(argv[2]);
diff --git a/dnn/lpcnet_private.h b/dnn/lpcnet_private.h
index e1e3e9c6..7fb8123a 100644
--- a/dnn/lpcnet_private.h
+++ b/dnn/lpcnet_private.h
@@ -80,5 +80,4 @@ void lpcnet_synthesize_blend_impl(LPCNetState *lpcnet, const opus_int16 *pcm_in,
void run_frame_network(LPCNetState *lpcnet, float *gru_a_condition, float *gru_b_condition, float *lpc, const float *features);
-int parse_weights(WeightArray **list, const unsigned char *data, int len);
#endif
diff --git a/dnn/nnet.h b/dnn/nnet.h
index 7eb7a57b..425a8dfe 100644
--- a/dnn/nnet.h
+++ b/dnn/nnet.h
@@ -138,6 +138,7 @@ void _lpcnet_compute_dense(const DenseLayer *layer, float *output, const float *
void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input, int arch);
+int parse_weights(WeightArray **list, const unsigned char *data, int len);
extern const WeightArray lpcnet_arrays[];