Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Buethe <jbuethe@amazon.de>2023-12-19 12:45:59 +0300
committerJean-Marc Valin <jmvalin@amazon.com>2023-12-20 07:01:37 +0300
commit49fc038c7648c5683c1a26b6061e5b5f86bf1ab1 (patch)
treeec5e7d9498c2f91e6504d14ecac68cc1ba17267f
parent96ff8d66eb2d4d2a16e3c28ec09dd2a82e75511c (diff)
fixed lengths issue for LACE and NoLACE weight names
-rw-r--r--dnn/osce.c77
-rw-r--r--dnn/osce_structs.h4
-rw-r--r--dnn/parse_lpcnet_weights.c3
-rw-r--r--dnn/torch/osce/export_model_weights.py6
-rw-r--r--dnn/write_lpcnet_weights.c2
5 files changed, 77 insertions, 15 deletions
diff --git a/dnn/osce.c b/dnn/osce.c
index 8abff88b..5cf0c936 100644
--- a/dnn/osce.c
+++ b/dnn/osce.c
@@ -128,7 +128,7 @@ static void lace_feature_net(
OPUS_COPY(input_buffer + LACE_NUM_FEATURES + LACE_PITCH_EMBEDDING_DIM, numbits_embedded, 2 * LACE_NUMBITS_EMBEDDING_DIM);
compute_generic_conv1d(
- &hLACE->layers.lace_feature_net_conv1,
+ &hLACE->layers.lace_fnet_conv1,
output_buffer + i_subframe * LACE_HIDDEN_FEATURE_DIM,
NULL,
input_buffer,
@@ -140,7 +140,7 @@ static void lace_feature_net(
/* subframe accumulation */
OPUS_COPY(input_buffer, output_buffer, 4 * LACE_HIDDEN_FEATURE_DIM);
compute_generic_conv1d(
- &hLACE->layers.lace_feature_net_conv2,
+ &hLACE->layers.lace_fnet_conv2,
output_buffer,
state->feature_net_conv2_state,
input_buffer,
@@ -152,7 +152,7 @@ static void lace_feature_net(
/* tconv upsampling */
OPUS_COPY(input_buffer, output_buffer, 4 * LACE_COND_DIM);
compute_generic_dense(
- &hLACE->layers.lace_feature_net_tconv,
+ &hLACE->layers.lace_fnet_tconv,
output_buffer,
input_buffer,
ACTIVATION_LINEAR,
@@ -164,8 +164,8 @@ static void lace_feature_net(
for (i_subframe = 0; i_subframe < 4; i_subframe++)
{
compute_generic_gru(
- &hLACE->layers.lace_feature_net_gru_input,
- &hLACE->layers.lace_feature_net_gru_recurrent,
+ &hLACE->layers.lace_fnet_gru_input,
+ &hLACE->layers.lace_fnet_gru_recurrent,
state->feature_net_gru_state,
input_buffer + i_subframe * LACE_COND_DIM,
arch
@@ -399,7 +399,7 @@ static void nolace_feature_net(
OPUS_COPY(input_buffer + NOLACE_NUM_FEATURES + NOLACE_PITCH_EMBEDDING_DIM, numbits_embedded, 2 * NOLACE_NUMBITS_EMBEDDING_DIM);
compute_generic_conv1d(
- &hNoLACE->layers.nolace_feature_net_conv1,
+ &hNoLACE->layers.nolace_fnet_conv1,
output_buffer + i_subframe * NOLACE_HIDDEN_FEATURE_DIM,
NULL,
input_buffer,
@@ -411,7 +411,7 @@ static void nolace_feature_net(
/* subframe accumulation */
OPUS_COPY(input_buffer, output_buffer, 4 * NOLACE_HIDDEN_FEATURE_DIM);
compute_generic_conv1d(
- &hNoLACE->layers.nolace_feature_net_conv2,
+ &hNoLACE->layers.nolace_fnet_conv2,
output_buffer,
state->feature_net_conv2_state,
input_buffer,
@@ -423,7 +423,7 @@ static void nolace_feature_net(
/* tconv upsampling */
OPUS_COPY(input_buffer, output_buffer, 4 * NOLACE_COND_DIM);
compute_generic_dense(
- &hNoLACE->layers.nolace_feature_net_tconv,
+ &hNoLACE->layers.nolace_fnet_tconv,
output_buffer,
input_buffer,
ACTIVATION_LINEAR,
@@ -435,8 +435,8 @@ static void nolace_feature_net(
for (i_subframe = 0; i_subframe < 4; i_subframe++)
{
compute_generic_gru(
- &hNoLACE->layers.nolace_feature_net_gru_input,
- &hNoLACE->layers.nolace_feature_net_gru_recurrent,
+ &hNoLACE->layers.nolace_fnet_gru_input,
+ &hNoLACE->layers.nolace_fnet_gru_recurrent,
state->feature_net_gru_state,
input_buffer + i_subframe * NOLACE_COND_DIM,
arch
@@ -813,6 +813,63 @@ void osce_reset(silk_OSCE_struct *hOSCE, int method)
}
+#if 0
+#include <stdio.h>
+static void print_float_array(FILE *fid, const char *name, const float *array, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ {
+ fprintf(fid, "%s[%d]: %f\n", name, i, array[i]);
+ }
+}
+
+static void print_int_array(FILE *fid, const char *name, const int *array, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ {
+ fprintf(fid, "%s[%d]: %d\n", name, i, array[i]);
+ }
+}
+
+static void print_int8_array(FILE *fid, const char *name, const opus_int8 *array, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ {
+ fprintf(fid, "%s[%d]: %d\n", name, i, array[i]);
+ }
+}
+
+static void print_linear_layer(FILE *fid, const char *name, LinearLayer *layer)
+{
+ int i, n_in, n_out, n_total;
+ char tmp[256];
+
+ n_in = layer->nb_inputs;
+ n_out = layer->nb_outputs;
+ n_total = n_in * n_out;
+
+ fprintf(fid, "\nprinting layer %s...\n", name);
+ fprintf(fid, "%s.nb_inputs: %d\n%s.nb_outputs: %d\n", name, n_in, name, n_out);
+
+ if (layer->bias !=NULL){}
+ if (layer->subias !=NULL){}
+ if (layer->weights !=NULL){}
+ if (layer->float_weights !=NULL){}
+
+ if (layer->bias != NULL) {sprintf(tmp, "%s.bias", name); print_float_array(fid, tmp, layer->bias, n_out);}
+ if (layer->subias != NULL) {sprintf(tmp, "%s.subias", name); print_float_array(fid, tmp, layer->subias, n_out);}
+ if (layer->weights != NULL) {sprintf(tmp, "%s.weights", name); print_int8_array(fid, tmp, layer->weights, n_total);}
+ if (layer->float_weights != NULL) {sprintf(tmp, "%s.float_weights", name); print_float_array(fid, tmp, layer->float_weights, n_total);}
+ //if (layer->weights_idx != NULL) {sprintf(tmp, "%s.weights_idx", name); print_float_array(fid, tmp, layer->weights_idx, n_total);}
+ if (layer->diag != NULL) {sprintf(tmp, "%s.diag", name); print_float_array(fid, tmp, layer->diag, n_in);}
+ if (layer->scale != NULL) {sprintf(tmp, "%s.scale", name); print_float_array(fid, tmp, layer->scale, n_out);}
+
+}
+#endif
+
int osce_load_models(OSCEModel *model, const unsigned char *data, int len)
{
int ret = 0;
diff --git a/dnn/osce_structs.h b/dnn/osce_structs.h
index f6b67c35..a4350be2 100644
--- a/dnn/osce_structs.h
+++ b/dnn/osce_structs.h
@@ -54,7 +54,7 @@ typedef struct {
#ifndef DISABLE_LACE
/* LACE */
typedef struct {
- float feature_net_conv2_state[LACE_FEATURE_NET_CONV2_STATE_SIZE];
+ float feature_net_conv2_state[LACE_FNET_CONV2_STATE_SIZE];
float feature_net_gru_state[LACE_COND_DIM];
AdaCombState cf1_state;
AdaCombState cf2_state;
@@ -75,7 +75,7 @@ typedef struct
#ifndef DISABLE_NOLACE
/* NoLACE */
typedef struct {
- float feature_net_conv2_state[NOLACE_FEATURE_NET_CONV2_STATE_SIZE];
+ float feature_net_conv2_state[NOLACE_FNET_CONV2_STATE_SIZE];
float feature_net_gru_state[NOLACE_COND_DIM];
float post_cf1_state[NOLACE_COND_DIM];
float post_cf2_state[NOLACE_COND_DIM];
diff --git a/dnn/parse_lpcnet_weights.c b/dnn/parse_lpcnet_weights.c
index c2108593..71f263e3 100644
--- a/dnn/parse_lpcnet_weights.c
+++ b/dnn/parse_lpcnet_weights.c
@@ -51,7 +51,7 @@ int parse_record(const unsigned char **data, int *len, WeightArray *array) {
*len -= h->block_size+WEIGHT_BLOCK_SIZE;
return array->size;
}
-
+#include <stdio.h>
int parse_weights(WeightArray **list, const unsigned char *data, int len)
{
int nb_arrays=0;
@@ -68,6 +68,7 @@ int parse_weights(WeightArray **list, const unsigned char *data, int len)
*list = opus_realloc(*list, capacity*sizeof(WeightArray));
}
(*list)[nb_arrays++] = array;
+ printf("loading weight %s\n", array.name);
} else {
opus_free(*list);
*list = NULL;
diff --git a/dnn/torch/osce/export_model_weights.py b/dnn/torch/osce/export_model_weights.py
index da90a39a..a16c6d59 100644
--- a/dnn/torch/osce/export_model_weights.py
+++ b/dnn/torch/osce/export_model_weights.py
@@ -111,11 +111,13 @@ def osce_dump_generic(writer, name, module):
dump_torch_weights(writer, module, name=name, verbose=True)
else:
for child_name, child in module.named_children():
- osce_dump_generic(writer, name + "_" + child_name, child)
+ osce_dump_generic(writer, (name + "_" + child_name).replace("feature_net", "fnet"), child)
def export_name(name):
- return name.replace('.', '_')
+ name = name.replace('.', '_')
+ name = name.replace('feature_net', 'fnet')
+ return name
def osce_scheduled_dump(writer, prefix, model, schedule):
if not prefix.endswith('_'):
diff --git a/dnn/write_lpcnet_weights.c b/dnn/write_lpcnet_weights.c
index da114213..c39ce87f 100644
--- a/dnn/write_lpcnet_weights.c
+++ b/dnn/write_lpcnet_weights.c
@@ -56,7 +56,9 @@ void write_weights(const WeightArray *list, FILE *fout)
int i=0;
unsigned char zeros[WEIGHT_BLOCK_SIZE] = {0};
while (list[i].name != NULL) {
+ printf("writing weight %s\n", list[i].name);
WeightHead h;
+ celt_assert(strlen(list[i].name) < sizeof(h.name) - 1 && "name too long in write_weights");
memcpy(h.head, "DNNw", 4);
h.version = WEIGHT_BLOB_VERSION;
h.type = list[i].type;