Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJean-Marc Valin <jmvalin@amazon.com>2023-12-22 05:30:53 +0300
committerJean-Marc Valin <jmvalin@amazon.com>2023-12-22 05:30:53 +0300
commitbd710e97f31cbd001c7f13e2c5ece01a3ed487d6 (patch)
treea57707a1ccca3685b78170af123e9b5611750c39
parentb923fd1e2811a4bf6a4ea1461a550c8d15143f01 (diff)
C code for packet loss simulator
-rw-r--r--dnn/lossgen.c69
-rw-r--r--dnn/lossgen.h30
-rw-r--r--dnn/nnet.h1
-rw-r--r--dnn/torch/lossgen/README.md2
4 files changed, 101 insertions, 1 deletions
diff --git a/dnn/lossgen.c b/dnn/lossgen.c
new file mode 100644
index 00000000..15505873
--- /dev/null
+++ b/dnn/lossgen.c
@@ -0,0 +1,69 @@
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <math.h>
+#include "lossgen.h"
+#include "os_support.h"
+#include "nnet.h"
+#include "lpcnet_private.h"
+
+int sample_loss(
+ LossGenState *st,
+ float percent_loss,
+ int arch
+ )
+{
+ float input[2];
+ float tmp[LOSSGEN_DENSE_IN_OUT_SIZE];
+ float out;
+ int loss;
+ LossGen *model = &st->model;
+ input[0] = st->last_loss;
+ input[1] = percent_loss;
+ compute_generic_dense(&model->lossgen_dense_in, tmp, input, ACTIVATION_TANH, arch);
+ compute_generic_gru(&model->lossgen_gru1_input, &model->lossgen_gru1_recurrent, st->gru1_state, tmp, arch);
+ compute_generic_gru(&model->lossgen_gru2_input, &model->lossgen_gru2_recurrent, st->gru2_state, st->gru1_state, arch);
+ compute_generic_dense(&model->lossgen_dense_out, &out, st->gru2_state, ACTIVATION_SIGMOID, arch);
+ loss = (float)rand()/RAND_MAX < out;
+ st->last_loss = loss;
+ return loss;
+}
+
+
+void lossgen_init(LossGenState *st)
+{
+ int ret;
+ OPUS_CLEAR(st, 1);
+#ifndef USE_WEIGHTS_FILE
+ ret = init_lossgen(&st->model, lossgen_arrays);
+#else
+ ret = 0;
+#endif
+ celt_assert(ret == 0);
+}
+
+int lossgen_load_model(LossGenState *st, const unsigned char *data, int len) {
+ WeightArray *list;
+ int ret;
+ parse_weights(&list, data, len);
+ ret = init_lossgen(&st->model, list);
+ opus_free(list);
+ if (ret == 0) return 0;
+ else return -1;
+}
+
+#if 0
+#include <stdio.h>
+int main(int argc, char **argv) {
+ int i, N;
+ float p;
+ LossGenState st;
+ lossgen_init(&st);
+ p = atof(argv[1]);
+ N = atoi(argv[2]);
+ for (i=0;i<N;i++) {
+ printf("%d\n", sample_loss(&st, p, 0));
+ }
+}
+#endif
diff --git a/dnn/lossgen.h b/dnn/lossgen.h
new file mode 100644
index 00000000..cb0460ae
--- /dev/null
+++ b/dnn/lossgen.h
@@ -0,0 +1,30 @@
+#ifndef LOSSGEN_H
+#define LOSSGEN_H
+
+
+#include "lossgen_data.h"
+
+#define PITCH_MIN_PERIOD 32
+#define PITCH_MAX_PERIOD 256
+
+#define NB_XCORR_FEATURES (PITCH_MAX_PERIOD-PITCH_MIN_PERIOD)
+
+
+typedef struct {
+ LossGen model;
+ float gru1_state[LOSSGEN_GRU1_STATE_SIZE];
+ float gru2_state[LOSSGEN_GRU2_STATE_SIZE];
+ int last_loss;
+} LossGenState;
+
+
+void lossgen_init(LossGenState *st);
+int lossgen_load_model(LossGenState *st, const unsigned char *data, int len);
+
+int sample_loss(
+ LossGenState *st,
+ float percent_loss,
+ int arch
+ );
+
+#endif
diff --git a/dnn/nnet.h b/dnn/nnet.h
index a2eaad82..7eb7a57b 100644
--- a/dnn/nnet.h
+++ b/dnn/nnet.h
@@ -147,6 +147,7 @@ extern const WeightArray rdovaedec_arrays[];
extern const WeightArray fwgan_arrays[];
extern const WeightArray fargan_arrays[];
extern const WeightArray pitchdnn_arrays[];
+extern const WeightArray lossgen_arrays[];
int linear_init(LinearLayer *layer, const WeightArray *arrays,
const char *bias,
diff --git a/dnn/torch/lossgen/README.md b/dnn/torch/lossgen/README.md
index 26abc9eb..55c1b442 100644
--- a/dnn/torch/lossgen/README.md
+++ b/dnn/torch/lossgen/README.md
@@ -7,7 +7,7 @@ to build a generative model for packet loss.
We use the training data provided for the Audio Deep Packet Loss Concealment Challenge, which is available at:
-http://plcchallenge2022pub.blob.core.windows.net/plcchallengearchive/test\_train.tar.gz
+http://plcchallenge2022pub.blob.core.windows.net/plcchallengearchive/test_train.tar.gz
To create the training data, run: