Welcome to mirror list, hosted at ThFree Co, Russian Federation.

lossgen.c « dnn - gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: a0da5a4ea279f1f6344d615b6fab68425f7cd976 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif

#include <math.h>
#include "lossgen.h"
#include "os_support.h"
#include "nnet.h"

/* Disable RTCD for this. */
#define RTCD_ARCH c

#include "nnet_arch.h"

#define MAX_RNN_NEURONS_ALL IMAX(LOSSGEN_GRU1_STATE_SIZE, LOSSGEN_GRU2_STATE_SIZE)

/* These two functions are copied from nnet.c to make sure we don't have linking issues. */
void compute_generic_gru_lossgen(const LinearLayer *input_weights, const LinearLayer *recurrent_weights, float *state, const float *in, int arch)
{
  int i;
  int N;
  float zrh[3*MAX_RNN_NEURONS_ALL];
  float recur[3*MAX_RNN_NEURONS_ALL];
  float *z;
  float *r;
  float *h;
  celt_assert(3*recurrent_weights->nb_inputs == recurrent_weights->nb_outputs);
  celt_assert(input_weights->nb_outputs == recurrent_weights->nb_outputs);
  N = recurrent_weights->nb_inputs;
  z = zrh;
  r = &zrh[N];
  h = &zrh[2*N];
  celt_assert(recurrent_weights->nb_outputs <= 3*MAX_RNN_NEURONS_ALL);
  celt_assert(in != state);
  compute_linear(input_weights, zrh, in, arch);
  compute_linear(recurrent_weights, recur, state, arch);
  for (i=0;i<2*N;i++)
     zrh[i] += recur[i];
  compute_activation(zrh, zrh, 2*N, ACTIVATION_SIGMOID, arch);
  for (i=0;i<N;i++)
     h[i] += recur[2*N+i]*r[i];
  compute_activation(h, h, N, ACTIVATION_TANH, arch);
  for (i=0;i<N;i++)
     h[i] = z[i]*state[i] + (1-z[i])*h[i];
  for (i=0;i<N;i++)
     state[i] = h[i];
}


void compute_generic_dense_lossgen(const LinearLayer *layer, float *output, const float *input, int activation, int arch)
{
   compute_linear(layer, output, input, arch);
   compute_activation(output, output, layer->nb_outputs, activation, arch);
}


int sample_loss(
    LossGenState *st,
    float percent_loss)
{
  float input[2];
  float tmp[LOSSGEN_DENSE_IN_OUT_SIZE];
  float out;
  int loss;
  LossGen *model = &st->model;
  input[0] = st->last_loss;
  input[1] = percent_loss;
  compute_generic_dense_lossgen(&model->lossgen_dense_in, tmp, input, ACTIVATION_TANH, 0);
  compute_generic_gru_lossgen(&model->lossgen_gru1_input, &model->lossgen_gru1_recurrent, st->gru1_state, tmp, 0);
  compute_generic_gru_lossgen(&model->lossgen_gru2_input, &model->lossgen_gru2_recurrent, st->gru2_state, st->gru1_state, 0);
  compute_generic_dense_lossgen(&model->lossgen_dense_out, &out, st->gru2_state, ACTIVATION_SIGMOID, 0);
  loss = (float)rand()/RAND_MAX < out;
  st->last_loss = loss;
  return loss;
}


void lossgen_init(LossGenState *st)
{
  int ret;
  OPUS_CLEAR(st, 1);
#ifndef USE_WEIGHTS_FILE
  ret = init_lossgen(&st->model, lossgen_arrays);
#else
  ret = 0;
#endif
  celt_assert(ret == 0);
  (void)ret;
}

int lossgen_load_model(LossGenState *st, const unsigned char *data, int len) {
  WeightArray *list;
  int ret;
  parse_weights(&list, data, len);
  ret = init_lossgen(&st->model, list);
  opus_free(list);
  if (ret == 0) return 0;
  else return -1;
}

#if 0
#include <stdio.h>
int main(int argc, char **argv) {
  int i, N;
  float p;
  LossGenState st;
  if (argc!=3) {
    fprintf(stderr, "usage: lossgen <percentage> <length>\n");
    return 1;
  }
  lossgen_init(&st);
  p = atof(argv[1]);
  N = atoi(argv[2]);
  for (i=0;i<N;i++) {
    printf("%d\n", sample_loss(&st, p));
  }
}
#endif