Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJean-Marc Valin <jmvalin@amazon.com>2023-07-17 23:11:33 +0300
committerJean-Marc Valin <jmvalin@amazon.com>2023-07-20 08:01:29 +0300
commit8423ef1de25d8006be837504c22080e0319a1d26 (patch)
tree704453280e73c04b8f2a300c9237d120ce59bdfd
parentbbb4cf554c3a892a9245dd684b225d1ff1651da7 (diff)
Remove unused code
-rw-r--r--dnn/nnet.c166
-rw-r--r--dnn/nnet.h6
2 files changed, 0 insertions, 172 deletions
diff --git a/dnn/nnet.c b/dnn/nnet.c
index e9b8a28a..6269f028 100644
--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -134,31 +134,6 @@ void _lpcnet_compute_dense(const DenseLayer *layer, float *output, const float *
compute_activation(output, output, N, layer->activation);
}
-void compute_mdense(const MDenseLayer *layer, float *output, const float *input)
-{
- int i, c;
- int N, M, C;
- int stride;
- float tmp[MAX_MDENSE_TMP];
- celt_assert(input != output);
- M = layer->nb_inputs;
- N = layer->nb_neurons;
- C = layer->nb_channels;
- celt_assert(N*C <= MAX_MDENSE_TMP);
- stride = N*C;
- for (i=0;i<N*C;i++)
- tmp[i] = layer->bias[i];
- sgemv_accum(tmp, layer->input_weights, N*C, M, stride, input);
- compute_activation(tmp, tmp, N*C, ACTIVATION_TANH);
- for (i=0;i<N;i++)
- output[i] = 0;
- for (c=0;c<C;c++)
- {
- for (i=0;i<N;i++)
- output[i] += tmp[c*N + i]*layer->factor[c*N + i];
- }
- compute_activation(output, output, N, layer->activation);
-}
int sample_mdense(const MDenseLayer *layer, const float *input, const float *sampling_logit_table, kiss99_ctx *rng)
{
@@ -214,113 +189,6 @@ int sample_mdense(const MDenseLayer *layer, const float *input, const float *sam
}
-#if 0
-void compute_gru(const GRULayer *gru, float *state, const float *input)
-{
- int i;
- int N, M;
- int stride;
- float tmp[MAX_RNN_NEURONS];
- float z[MAX_RNN_NEURONS];
- float r[MAX_RNN_NEURONS];
- float h[MAX_RNN_NEURONS];
- celt_assert(gru->nb_neurons <= MAX_RNN_NEURONS);
- celt_assert(input != state);
- M = gru->nb_inputs;
- N = gru->nb_neurons;
- stride = 3*N;
- /* Compute update gate. */
- for (i=0;i<N;i++)
- z[i] = gru->bias[i];
- if (gru->reset_after)
- {
- for (i=0;i<N;i++)
- z[i] += gru->bias[3*N + i];
- }
- sgemv_accum(z, gru->input_weights, N, M, stride, input);
- sgemv_accum(z, gru->recurrent_weights, N, N, stride, state);
- compute_activation(z, z, N, ACTIVATION_SIGMOID);
-
- /* Compute reset gate. */
- for (i=0;i<N;i++)
- r[i] = gru->bias[N + i];
- if (gru->reset_after)
- {
- for (i=0;i<N;i++)
- r[i] += gru->bias[4*N + i];
- }
- sgemv_accum(r, &gru->input_weights[N], N, M, stride, input);
- sgemv_accum(r, &gru->recurrent_weights[N], N, N, stride, state);
- compute_activation(r, r, N, ACTIVATION_SIGMOID);
-
- /* Compute output. */
- for (i=0;i<N;i++)
- h[i] = gru->bias[2*N + i];
- if (gru->reset_after)
- {
- for (i=0;i<N;i++)
- tmp[i] = gru->bias[5*N + i];
- sgemv_accum(tmp, &gru->recurrent_weights[2*N], N, N, stride, state);
- for (i=0;i<N;i++)
- h[i] += tmp[i] * r[i];
- sgemv_accum(h, &gru->input_weights[2*N], N, M, stride, input);
- } else {
- for (i=0;i<N;i++)
- tmp[i] = state[i] * r[i];
- sgemv_accum(h, &gru->input_weights[2*N], N, M, stride, input);
- sgemv_accum(h, &gru->recurrent_weights[2*N], N, N, stride, tmp);
- }
- compute_activation(h, h, N, gru->activation);
- for (i=0;i<N;i++)
- h[i] = z[i]*state[i] + (1-z[i])*h[i];
- for (i=0;i<N;i++)
- state[i] = h[i];
-}
-#endif
-
-void compute_gru2(const GRULayer *gru, float *state, const float *input)
-{
- int i;
- int N, M;
- int stride;
- float zrh[3*MAX_RNN_NEURONS];
- float recur[3*MAX_RNN_NEURONS];
- float *z;
- float *r;
- float *h;
- M = gru->nb_inputs;
- N = gru->nb_neurons;
- z = zrh;
- r = &zrh[N];
- h = &zrh[2*N];
- celt_assert(gru->nb_neurons <= MAX_RNN_NEURONS);
- celt_assert(input != state);
- celt_assert(gru->reset_after);
- stride = 3*N;
- /* Compute update gate. */
-#ifdef USE_SU_BIAS
- for (i=0;i<3*N;i++)
- zrh[i] = gru->subias[i];
-#else
- for (i=0;i<3*N;i++)
- zrh[i] = gru->bias[i];
-#endif
- sgemv_accum8x4(zrh, gru->input_weights, 3*N, M, stride, input);
- for (i=0;i<3*N;i++)
- recur[i] = gru->bias[3*N + i];
- sgemv_accum8x4(recur, gru->recurrent_weights, 3*N, N, stride, state);
- for (i=0;i<2*N;i++)
- zrh[i] += recur[i];
- compute_activation(zrh, zrh, 2*N, ACTIVATION_SIGMOID);
- for (i=0;i<N;i++)
- h[i] += recur[2*N+i]*r[i];
- compute_activation(h, h, N, gru->activation);
- for (i=0;i<N;i++)
- h[i] = z[i]*state[i] + (1-z[i])*h[i];
- for (i=0;i<N;i++)
- state[i] = h[i];
-}
-
#define MAX_RNN_NEURONS_ALL IMAX(IMAX(MAX_RNN_NEURONS, PLC_MAX_RNN_NEURONS), DRED_MAX_RNN_NEURONS)
void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input)
@@ -372,40 +240,6 @@ void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *stat
}
-void compute_gru3(const GRULayer *gru, float *state, const float *input)
-{
- int i;
- int N;
- int stride;
- float zrh[3*MAX_RNN_NEURONS_ALL];
- float recur[3*MAX_RNN_NEURONS_ALL];
- float *z;
- float *r;
- float *h;
- N = gru->nb_neurons;
- z = zrh;
- r = &zrh[N];
- h = &zrh[2*N];
- celt_assert(gru->nb_neurons <= MAX_RNN_NEURONS_ALL);
- celt_assert(input != state);
- celt_assert(gru->reset_after);
- stride = 3*N;
- OPUS_COPY(zrh, input, 3*N);
- for (i=0;i<3*N;i++)
- recur[i] = gru->bias[3*N + i];
- sgemv_accum8x4(recur, gru->recurrent_weights, 3*N, N, stride, state);
- for (i=0;i<2*N;i++)
- zrh[i] += recur[i];
- compute_activation(zrh, zrh, 2*N, ACTIVATION_SIGMOID);
- for (i=0;i<N;i++)
- h[i] += recur[2*N+i]*r[i];
- compute_activation(h, h, N, gru->activation);
- for (i=0;i<N;i++)
- h[i] = z[i]*state[i] + (1-z[i])*h[i];
- for (i=0;i<N;i++)
- state[i] = h[i];
-}
-
/* The input of this GRU is after the input matrix multiply. */
void compute_sparse_gru(const SparseGRULayer *gru, float *state, const float *input)
{
diff --git a/dnn/nnet.h b/dnn/nnet.h
index a5fc79e1..f5c0777e 100644
--- a/dnn/nnet.h
+++ b/dnn/nnet.h
@@ -126,14 +126,8 @@ void compute_mdense(const MDenseLayer *layer, float *output, const float *input)
int sample_mdense(const MDenseLayer *layer, const float *input, const float *sampling_logit_table, kiss99_ctx *rng);
-void compute_gru(const GRULayer *gru, float *state, const float *input);
-
-void compute_gru2(const GRULayer *gru, float *state, const float *input);
-
void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input);
-void compute_gru3(const GRULayer *gru, float *state, const float *input);
-
void compute_sparse_gru(const SparseGRULayer *gru, float *state, const float *input);
void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const float *input);