Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Buethe <jbuethe@amazon.de>2023-12-18 19:09:54 +0300
committerJan Buethe <jbuethe@amazon.de>2023-12-18 19:09:54 +0300
commitb7b1a38cd7b502f6a7e5b192c96868883dcc3a38 (patch)
tree2a95abeebd42635a8c0199b7f69992ccc1ddb151
parentd950fd01ac5dcd230985e8adc2bb56c579d724c7 (diff)
updated C and export code
-rw-r--r--dnn/nndsp.c10
-rw-r--r--dnn/nndsp.h6
-rw-r--r--dnn/osce.c13
-rw-r--r--dnn/torch/osce/export_model_weights.py10
4 files changed, 24 insertions, 15 deletions
diff --git a/dnn/nndsp.c b/dnn/nndsp.c
index d5c3dc8f..4ca0897c 100644
--- a/dnn/nndsp.c
+++ b/dnn/nndsp.c
@@ -340,7 +340,8 @@ void adashape_process_frame(
float *x_out,
const float *x_in,
const float *features,
- const LinearLayer *alpha1,
+ const LinearLayer *alpha1f,
+ const LinearLayer *alpha1t,
const LinearLayer *alpha2,
int feature_dim,
int frame_size,
@@ -350,6 +351,7 @@ void adashape_process_frame(
{
float in_buffer[ADASHAPE_MAX_INPUT_DIM + ADASHAPE_MAX_FRAME_SIZE];
float out_buffer[ADASHAPE_MAX_FRAME_SIZE];
+ float tmp_buffer[ADASHAPE_MAX_FRAME_SIZE];
int i, k;
int tenv_size;
float mean;
@@ -389,14 +391,16 @@ void adashape_process_frame(
#ifdef DEBUG_NNDSP
print_float_vector("alpha1_in", in_buffer, feature_dim + tenv_size + 1);
#endif
- compute_generic_conv1d(alpha1, out_buffer, hAdaShape->conv_alpha1_state, in_buffer, feature_dim + tenv_size + 1, ACTIVATION_LINEAR, arch);
+ compute_generic_conv1d(alpha1f, out_buffer, hAdaShape->conv_alpha1f_state, in_buffer, feature_dim, ACTIVATION_LINEAR, arch);
+ compute_generic_conv1d(alpha1t, tmp_buffer, hAdaShape->conv_alpha1t_state, tenv, tenv_size + 1, ACTIVATION_LINEAR, arch);
#ifdef DEBUG_NNDSP
print_float_vector("alpha1_out", out_buffer, frame_size);
#endif
/* compute leaky ReLU by hand. ToDo: try tanh activation */
for (i = 0; i < frame_size; i ++)
{
- in_buffer[i] = out_buffer[i] >= 0 ? out_buffer[i] : 0.2 * out_buffer[i];
+ float tmp = out_buffer[i] + tmp_buffer[i];
+ in_buffer[i] = tmp >= 0 ? tmp : 0.2 * tmp;
}
#ifdef DEBUG_NNDSP
print_float_vector("post_alpha1", in_buffer, frame_size);
diff --git a/dnn/nndsp.h b/dnn/nndsp.h
index f00094b6..6021250f 100644
--- a/dnn/nndsp.h
+++ b/dnn/nndsp.h
@@ -71,7 +71,8 @@ typedef struct {
typedef struct {
- float conv_alpha1_state[ADASHAPE_MAX_INPUT_DIM];
+ float conv_alpha1f_state[ADASHAPE_MAX_INPUT_DIM];
+ float conv_alpha1t_state[ADASHAPE_MAX_INPUT_DIM];
float conv_alpha2_state[ADASHAPE_MAX_FRAME_SIZE];
} AdaShapeState;
@@ -130,7 +131,8 @@ void adashape_process_frame(
float *x_out,
const float *x_in,
const float *features,
- const LinearLayer *alpha1,
+ const LinearLayer *alpha1f,
+ const LinearLayer *alpha1t,
const LinearLayer *alpha2,
int feature_dim,
int frame_size,
diff --git a/dnn/osce.c b/dnn/osce.c
index fb0369f7..1c57c5bc 100644
--- a/dnn/osce.c
+++ b/dnn/osce.c
@@ -633,7 +633,8 @@ static void nolace_process_20ms_frame(
x_buffer2 + i_subframe * NOLACE_AF1_OUT_CHANNELS * NOLACE_FRAME_SIZE + NOLACE_FRAME_SIZE,
x_buffer2 + i_subframe * NOLACE_AF1_OUT_CHANNELS * NOLACE_FRAME_SIZE + NOLACE_FRAME_SIZE,
feature_buffer + i_subframe * NOLACE_COND_DIM,
- &layers->nolace_tdshape1_alpha1,
+ &layers->nolace_tdshape1_alpha1_f,
+ &layers->nolace_tdshape1_alpha1_t,
&layers->nolace_tdshape1_alpha2,
NOLACE_TDSHAPE1_FEATURE_DIM,
NOLACE_TDSHAPE1_FRAME_SIZE,
@@ -688,7 +689,8 @@ static void nolace_process_20ms_frame(
x_buffer1 + i_subframe * NOLACE_AF2_OUT_CHANNELS * NOLACE_FRAME_SIZE + NOLACE_FRAME_SIZE,
x_buffer1 + i_subframe * NOLACE_AF2_OUT_CHANNELS * NOLACE_FRAME_SIZE + NOLACE_FRAME_SIZE,
feature_buffer + i_subframe * NOLACE_COND_DIM,
- &layers->nolace_tdshape2_alpha1,
+ &layers->nolace_tdshape2_alpha1_f,
+ &layers->nolace_tdshape2_alpha1_t,
&layers->nolace_tdshape2_alpha2,
NOLACE_TDSHAPE2_FEATURE_DIM,
NOLACE_TDSHAPE2_FRAME_SIZE,
@@ -739,7 +741,8 @@ static void nolace_process_20ms_frame(
x_buffer2 + i_subframe * NOLACE_AF3_OUT_CHANNELS * NOLACE_FRAME_SIZE + NOLACE_FRAME_SIZE,
x_buffer2 + i_subframe * NOLACE_AF3_OUT_CHANNELS * NOLACE_FRAME_SIZE + NOLACE_FRAME_SIZE,
feature_buffer + i_subframe * NOLACE_COND_DIM,
- &layers->nolace_tdshape3_alpha1,
+ &layers->nolace_tdshape3_alpha1_f,
+ &layers->nolace_tdshape3_alpha1_t,
&layers->nolace_tdshape3_alpha2,
NOLACE_TDSHAPE3_FEATURE_DIM,
NOLACE_TDSHAPE3_FRAME_SIZE,
@@ -827,7 +830,7 @@ int osce_load_models(OSCEModel *model, const unsigned char *data, int len)
if (ret == 0) {ret = init_lace(&model->lace, list);}
#endif
-#ifndef DISABLE_LACE
+#ifndef DISABLE_NOLACE
if (ret == 0) {ret = init_nolace(&model->nolace, list);}
#endif
@@ -841,7 +844,7 @@ int osce_load_models(OSCEModel *model, const unsigned char *data, int len)
if (ret == 0) {ret = init_lace(&model->lace, lacelayers_arrays);}
#endif
-#ifndef DISABLE_LACE
+#ifndef DISABLE_NOLACE
if (ret == 0) {ret = init_nolace(&model->nolace, nolacelayers_arrays);}
#endif
diff --git a/dnn/torch/osce/export_model_weights.py b/dnn/torch/osce/export_model_weights.py
index da90a39a..3aac3a0c 100644
--- a/dnn/torch/osce/export_model_weights.py
+++ b/dnn/torch/osce/export_model_weights.py
@@ -60,13 +60,13 @@ schedules = {
('feature_net.conv1', dict()),
('feature_net.conv2', dict(quantize=True, scale=None)),
('feature_net.tconv', dict(quantize=True, scale=None)),
- ('feature_net.gru', dict()),
+ ('feature_net.gru', dict(quantize=True, scale=None, recurrent_scale=None)),
('cf1', dict(quantize=True, scale=None)),
('cf2', dict(quantize=True, scale=None)),
('af1', dict(quantize=True, scale=None)),
- ('tdshape1', dict()),
- ('tdshape2', dict()),
- ('tdshape3', dict()),
+ ('tdshape1', dict(quantize=True, scale=None)),
+ ('tdshape2', dict(quantize=True, scale=None)),
+ ('tdshape3', dict(quantize=True, scale=None)),
('af2', dict(quantize=True, scale=None)),
('af3', dict(quantize=True, scale=None)),
('af4', dict(quantize=True, scale=None)),
@@ -81,7 +81,7 @@ schedules = {
('feature_net.conv1', dict()),
('feature_net.conv2', dict(quantize=True, scale=None)),
('feature_net.tconv', dict(quantize=True, scale=None)),
- ('feature_net.gru', dict()),
+ ('feature_net.gru', dict(quantize=True, scale=None, recurrent_scale=None)),
('cf1', dict(quantize=True, scale=None)),
('cf2', dict(quantize=True, scale=None)),
('af1', dict(quantize=True, scale=None))