Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJean-Marc Valin <jmvalin@amazon.com>2023-07-23 21:21:21 +0300
committerJean-Marc Valin <jmvalin@amazon.com>2023-07-23 21:49:13 +0300
commit9d40e5cb0813464a9c6089210cbb72d99b94f253 (patch)
tree3c90fbbedcd460904579322c49d48ff3663590f7
parent587c1020feea25920851e984f7e2aef784263a57 (diff)
Add loading for LinearLayer
Untested
-rw-r--r--dnn/nnet.h11
-rw-r--r--dnn/parse_lpcnet_weights.c42
2 files changed, 53 insertions, 0 deletions
diff --git a/dnn/nnet.h b/dnn/nnet.h
index 92a36ec2..a8746c16 100644
--- a/dnn/nnet.h
+++ b/dnn/nnet.h
@@ -158,6 +158,17 @@ extern const WeightArray lpcnet_plc_arrays[];
extern const WeightArray rdovae_enc_arrays[];
extern const WeightArray rdovae_dec_arrays[];
+int linear_init(LinearLayer *layer, const WeightArray *arrays,
+ const char *bias,
+ const char *subias,
+ const char *weights,
+ const char *float_weights,
+ const char *weights_idx,
+ const char *diag,
+ const char *scale,
+ int nb_inputs,
+ int nb_outputs);
+
int mdense_init(MDenseLayer *layer, const WeightArray *arrays,
const char *bias,
const char *input_weights,
diff --git a/dnn/parse_lpcnet_weights.c b/dnn/parse_lpcnet_weights.c
index 493ecb0a..833f972f 100644
--- a/dnn/parse_lpcnet_weights.c
+++ b/dnn/parse_lpcnet_weights.c
@@ -113,6 +113,48 @@ static const void *find_idx_check(const WeightArray *arrays, const char *name, i
return a->data;
}
+int linear_init(LinearLayer *layer, const WeightArray *arrays,
+ const char *bias,
+ const char *subias,
+ const char *weights,
+ const char *float_weights,
+ const char *weights_idx,
+ const char *diag,
+ const char *scale,
+ int nb_inputs,
+ int nb_outputs)
+{
+ int total_blocks;
+ if ((layer->bias = find_array_check(arrays, bias, nb_outputs*sizeof(layer->bias[0]))) == NULL) return 1;
+ if ((layer->subias = find_array_check(arrays, subias, nb_outputs*sizeof(layer->subias[0]))) == NULL) return 1;
+ layer->weights = NULL;
+ layer->float_weights = NULL;
+ layer->weights_idx = NULL;
+ if (weights_idx != NULL) {
+ if ((layer->weights_idx = find_idx_check(arrays, weights_idx, nb_outputs, nb_inputs, &total_blocks)) == NULL) return 1;
+ }
+ if (weights_idx != NULL) {
+ if (weights != NULL) {
+ if ((layer->weights = find_array_check(arrays, weights, SPARSE_BLOCK_SIZE*total_blocks*sizeof(layer->weights[0]))) == NULL) return 1;
+ }
+ if (float_weights != NULL) {
+ if ((layer->float_weights = find_array_check(arrays, float_weights, SPARSE_BLOCK_SIZE*total_blocks*sizeof(layer->float_weights[0]))) == NULL) return 1;
+ }
+ } else {
+ if (weights != NULL) {
+ if ((layer->weights = find_array_check(arrays, weights, nb_inputs*nb_outputs*sizeof(layer->weights[0]))) == NULL) return 1;
+ }
+ if (float_weights != NULL) {
+ if ((layer->float_weights = find_array_check(arrays, float_weights, nb_inputs*nb_outputs*sizeof(layer->float_weights[0]))) == NULL) return 1;
+ }
+ }
+ if ((layer->diag = find_array_check(arrays, diag, nb_outputs*sizeof(layer->diag[0]))) == NULL) return 1;
+ if ((layer->scale = find_array_check(arrays, scale, nb_outputs*sizeof(layer->scale[0]))) == NULL) return 1;
+ layer->nb_inputs = nb_inputs;
+ layer->nb_outputs = nb_outputs;
+ return 0;
+}
+
int mdense_init(MDenseLayer *layer, const WeightArray *arrays,
const char *bias,
const char *input_weights,