MBDyn-1.7.3
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups
ann.c File Reference
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "ann.h"
Include dependency graph for ann.c:

Go to the source code of this file.

Functions

ann_res_t ANN_init (ANN *net, const char *FileName)
 
ann_res_t ANN_destroy (ANN *net)
 
ann_res_t ANN_write (ANN *net, FILE *fh, unsigned flags)
 
ann_res_t ANN_sim (ANN *net, vector *input, vector *output, unsigned flags)
 
ann_res_t ANN_DataRead (matrix *MAT, int *N_sample, char *FileName)
 
ann_res_t ANN_DataWrite (matrix *MAT, char *FileName)
 
double ANN_InternalFunction (double v, ANN *net)
 
double ANN_InternalFunctionDer (double v, ANN *net)
 
ann_res_t ANN_WeightUpdate (ANN *net, ANN_vector_matrix DW, double K)
 
ann_res_t ANN_vector_matrix_init (ANN_vector_matrix *vm, int *N_neuron, int N_layer)
 
ann_res_t ANN_vector_vector_init (ANN_vector_vector *vv, int *N_neuron, int N_layer)
 
ann_res_t ANN_dXdW (ANN *net, int I, int J, int N)
 
ann_res_t ANN_dEdW (ANN *net, vector *e)
 
ann_res_t ANN_TrainingEpoch (ANN *net, matrix *INPUT, matrix *DES_OUTPUT, matrix *NN_OUTPUT, int N_sample, ann_training_mode_t mode)
 
ann_res_t ANN_reset (ANN *net)
 
ann_res_t ANN_TotalError (matrix *DES_OUTPUT, matrix *NN_OUTPUT, double *err)
 
ann_res_t ANN_vector_matrix_ass (ANN_vector_matrix *vm1, ANN_vector_matrix *vm2, int *N_neuron, int N_layer, double K)
 
void ANN_error (ann_res_t error, char *string)
 
ann_res_t ANN_jacobian_matrix (ANN *net, matrix *jacobian)
 

Function Documentation

ann_res_t ANN_DataRead ( matrix MAT,
int N_sample,
char *  FileName 
)

Definition at line 475 of file ann.c.

References ANN_error(), ANN_MATRIX_ERROR, ANN_NO_FILE, ANN_OK, matrix_init(), matrix_read(), and W_M_BIN.

Referenced by main().

475  {
476 
477  int Nrow, Ncolumn;
478  FILE *fh;
479 
480  if( !( fh = fopen( FileName, "r" ) ) ){
481  ANN_error( ANN_NO_FILE, "ANN_DataRead" );
482  return ANN_NO_FILE;
483  }
484 
485  fscanf( fh, "%d", &Nrow);
486  fscanf( fh, "%d", &Ncolumn);
487  if( matrix_init( MAT, Nrow, Ncolumn ) ){
488  ANN_error( ANN_MATRIX_ERROR, "ANN_DataRead" );
489  return ANN_MATRIX_ERROR;
490  }
491 
492  if( matrix_read( MAT, fh, W_M_BIN) ){
493  ANN_error( ANN_MATRIX_ERROR, "ANN_DataRead" );
494  return ANN_MATRIX_ERROR;
495  }
496 
497  fclose(fh);
498 
499  *N_sample = Nrow;
500 
501  return ANN_OK;
502 }
#define W_M_BIN
Definition: matrix.h:48
mat_res_t matrix_read(matrix *MAT, FILE *fh, unsigned flags)
Definition: matrix.c:506
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
Definition: ann.h:56
mat_res_t matrix_init(matrix *MAT, unsigned Nrow, unsigned Ncolumn)
Definition: matrix.c:43

Here is the call graph for this function:

ann_res_t ANN_DataWrite ( matrix MAT,
char *  FileName 
)

Definition at line 504 of file ann.c.

References ANN_error(), ANN_MATRIX_ERROR, ANN_NO_FILE, ANN_OK, matrix_write(), matrix::Ncolumn, matrix::Nrow, and W_M_BIN.

Referenced by main().

504  {
505 
506  FILE *fh;
507 
508  if( !( fh = fopen( FileName, "w" ) ) ){
509  ANN_error( ANN_NO_FILE, "ANN_DataWrite" );
510  return ANN_NO_FILE;
511  }
512 
513  fprintf( fh, "%d %d", MAT->Nrow, MAT->Ncolumn );
514 
515  if( matrix_write(MAT, fh, W_M_BIN) ){
516  ANN_error( ANN_MATRIX_ERROR, "ANN_DataWrite" );
517  return ANN_MATRIX_ERROR;
518  }
519 
520  fclose(fh);
521 
522  return ANN_OK;
523 }
mat_res_t matrix_write(matrix *MAT, FILE *fh, unsigned flags)
Definition: matrix.c:467
unsigned Ncolumn
Definition: matrix.h:64
#define W_M_BIN
Definition: matrix.h:48
unsigned Nrow
Definition: matrix.h:63
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
Definition: ann.h:56

Here is the call graph for this function:

ann_res_t ANN_dEdW ( ANN net,
vector e 
)

Definition at line 645 of file ann.c.

References ANN_dXdW(), ANN_error(), ANN_GEN_ERROR, ANN_InternalFunction(), ANN_InternalFunctionDer(), ANN_MATRIX_ERROR, ANN_OK, ANN_vector_matrix_ass(), ANN::dEdV, ANN::dEdW, ANN::dXdW, ANN::dy, ANN::dydV, ANN::dydW, ANN::eta, matrix::mat, matrix_vector_prod(), matrixT_vector_prod(), ANN::N_layer, ANN::N_neuron, ANN::N_output, ANN::r, ANN::rho, ANN::temp, ANN::v, vector::vec, ANN::W, and ANN::Y_neuron.

Referenced by ANN_TrainingEpoch().

645  {
646 
647  int i,j,k,p,l,q;
648  double temp;
649 
650  /* Output gradient ( visible layer )*/
651  for( k=0;k<net->N_neuron[net->N_layer];k++ ){
652  for( l=0;l<net->N_neuron[net->N_layer+1];l++ ){
653  if( ANN_dXdW( net , k , l , net->N_layer ) ){
654  ANN_error( ANN_GEN_ERROR, "ANN_dEdW" );
655  return ANN_GEN_ERROR;
656  }
657  if (matrixT_vector_prod( &net->W[net->N_layer], &net->dXdW[net->N_layer] ,&net->temp[net->N_layer+1] ) ){
658  ANN_error( ANN_MATRIX_ERROR, "ANN_dEdW" );
659  return ANN_MATRIX_ERROR;
660  }
661  for( j=0; j<net->N_neuron[net->N_layer+1]; j++ ){
662  net->dydW[j][net->N_layer].mat[k][l] = ANN_InternalFunctionDer(net->v[net->N_layer+1].vec[j], net)*( net->temp[net->N_layer+1].vec[j] + net->Y_neuron[net->N_layer].vec[k]*(l==j) );
663  }
664 
665  temp = 0.;
666  for( j=0; j<net->N_output; j++ ){
667  temp += -net->dydW[j][net->N_layer].mat[k][l]*e->vec[j];
668  }
669  net->dEdW[net->N_layer].mat[k][l] = net->rho*net->dEdW[net->N_layer].mat[k][l] - net->eta*temp;
670  }
671  }
672 
673  /* Output gradient (hidden layer) */
674  for( q=0; q<net->N_neuron[net->N_layer+1]; q++ ){
675  for( i=0; i<(net->N_neuron[net->N_layer+1] ); i++ ){
676  net->dydV[net->N_layer+1].vec[i] = 0.;
677  }
678  net->dydV[net->N_layer+1].vec[q] = ANN_InternalFunctionDer(net->v[net->N_layer+1].vec[q],net);
679 
680  for( k=0;k<net->N_layer;k++ ){
681  if( matrix_vector_prod( &net->W[net->N_layer-k], &net->dydV[net->N_layer-k+1] ,&net->temp[net->N_layer-k] ) ){
682  ANN_error( ANN_MATRIX_ERROR, "ANN_dEdW" );
683  return ANN_MATRIX_ERROR;
684  }
685  for( j=0;j<net->N_neuron[net->N_layer-k];j++ ){
686  net->dydV[net->N_layer-k].vec[j] = net->temp[net->N_layer-k].vec[j]*ANN_InternalFunctionDer(net->v[net->N_layer-k].vec[j],net);
687  }
688  for( i=0;i<net->N_neuron[net->N_layer-k-1];i++ ){
689  for( j=0;j<net->N_neuron[net->N_layer-k];j++ ){
690  if( ANN_dXdW( net ,i ,j ,(net->N_layer-k-1) ) ){
691  ANN_error( ANN_GEN_ERROR, "ANN_dEdW" );
692  return ANN_GEN_ERROR;
693  }
694  if( net->N_layer-k-1 != 0 ){
695  temp = ANN_InternalFunction(net->v[net->N_layer-k-1].vec[i],net);
696  }
697  else{
698  temp = (net->v[0].vec[i]);
699  }
700 
701  for( p=0;p<(net->N_neuron[net->N_layer-k-1]);p++ ){
702  temp += net->W[net->N_layer-k-1].mat[p][j]*net->dXdW[net->N_layer-k-1].vec[p];
703  }
704  //net->dydW[q][net->N_layer-1-k].mat[i][j] = net->dydV[net->N_layer+1-k].vec[j]*temp;
705  net->dydW[q][net->N_layer-1-k].mat[i][j] = net->dydV[net->N_layer-k].vec[j]*temp;
706  }
707  }
708  }
709  }
710  /* calcolo la derivata dell'errore rispetto ai pesi degli
711  * degli strati non visibili */
712  for( i=0; i<(net->N_output); i++ ){
713  net->dEdV[net->N_layer+1].vec[i] = -e->vec[i]*ANN_InternalFunctionDer(net->v[net->N_layer+1].vec[i],net);
714  }
715 
716  for( k=0; k<net->N_layer; k++ ){
717  matrix_vector_prod( &net->W[net->N_layer-k], &net->dEdV[net->N_layer-k+1] ,&net->temp[net->N_layer-k] );
718  for( j=0;j<net->N_neuron[net->N_layer-k];j++ ){
719  net->dEdV[net->N_layer-k].vec[j] = net->temp[net->N_layer-k].vec[j]*ANN_InternalFunctionDer(net->v[net->N_layer-k].vec[j],net);
720  }
721  for( i=0;i<net->N_neuron[net->N_layer-k-1];i++ ){
722  for( j=0;j<net->N_neuron[net->N_layer-k];j++ ){
723  if( ANN_dXdW( net , i , j , (net->N_layer-k-1) ) ){
724  ANN_error( ANN_GEN_ERROR, "ANN_dEdW" );
725  return ANN_GEN_ERROR;
726  }
727 
728  if( net->N_layer-k-1 != 0 ){
729  temp = ANN_InternalFunction(net->v[net->N_layer-k-1].vec[i],net);
730  }
731  else{
732  temp = (net->v[0].vec[i]);
733  }
734 
735  for( p=0;p<(net->N_neuron[net->N_layer-k-1]);p++ ){
736  temp += net->W[net->N_layer-k-1].mat[p][j]*net->dXdW[net->N_layer-k-1].vec[p];
737  }
738  //net->dEdW[net->N_layer-1-k].mat[i][j] = net->rho*net->dEdW[net->N_layer-1-k].mat[i][j] - net->eta*net->dEdV[net->N_layer+1-k].vec[j]*temp;
739  net->dEdW[net->N_layer-1-k].mat[i][j] = net->rho*net->dEdW[net->N_layer-1-k].mat[i][j] - net->eta*net->dEdV[net->N_layer-k].vec[j]*temp;
740  }
741  }
742  }
743  /* aggiorno la struttura dati contenente la derivata di tutte le uscite rispetto
744  * a tutti i pesi della rete salvata per gli r passi precedenti */
745 
746  for( p=0;p<(net->r-1);p++ ){
747  for( i=0; i<net->N_neuron[net->N_layer+1];i++ ){
748  if( ANN_vector_matrix_ass( &net->dy[net->r-1-p][i], &net->dy[net->r-2-p][i], net->N_neuron, net->N_layer, 1. ) ){
749  ANN_error( ANN_GEN_ERROR, "ANN_dEDW" );
750  return ANN_GEN_ERROR;
751  }
752  }
753  }
754  if( net->r != 0 ){
755  for( i=0; i<net->N_neuron[net->N_layer+1];i++ ){
756  if( ANN_vector_matrix_ass( &net->dy[0][i], &net->dydW[i], net->N_neuron, net->N_layer, 1. ) ){
757  ANN_error( ANN_GEN_ERROR, "ANN_dEdW" );
758  return ANN_GEN_ERROR;
759  }
760  }
761  }
762 
763  return ANN_OK;
764 }
ANN_vector_vector dXdW
Definition: ann.h:115
ANN_vector_matrix W
Definition: ann.h:95
ann_res_t ANN_vector_matrix_ass(ANN_vector_matrix *vm1, ANN_vector_matrix *vm2, int *N_neuron, int N_layer, double K)
Definition: ann.c:883
ANN_vector_vector dEdV
Definition: ann.h:115
double eta
Definition: ann.h:91
int N_output
Definition: ann.h:85
mat_res_t matrixT_vector_prod(matrix *MAT, vector *VEC, vector *VEC_R)
Definition: matrix.c:379
ann_res_t ANN_dXdW(ANN *net, int I, int J, int N)
Definition: ann.c:609
ANN_vector_vector v
Definition: ann.h:105
int r
Definition: ann.h:88
int N_layer
Definition: ann.h:86
ANN_vector_vector temp
Definition: ann.h:115
mat_res_t matrix_vector_prod(matrix *MAT, vector *VEC, vector *VEC_R)
Definition: matrix.c:354
ANN_vector_matrix * dydW
Definition: ann.h:114
int * N_neuron
Definition: ann.h:87
double ** mat
Definition: matrix.h:62
double * vec
Definition: matrix.h:69
double ANN_InternalFunctionDer(double v, ANN *net)
Definition: ann.c:540
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
ANN_vector_matrix ** dy
Definition: ann.h:111
ANN_vector_vector dydV
Definition: ann.h:115
Definition: ann.h:56
ANN_vector_matrix dEdW
Definition: ann.h:110
ANN_vector_vector Y_neuron
Definition: ann.h:106
double ANN_InternalFunction(double v, ANN *net)
Definition: ann.c:526
double rho
Definition: ann.h:92

Here is the call graph for this function:

ann_res_t ANN_destroy ( ANN net)

Definition at line 218 of file ann.c.

References ANN_error(), ANN_MATRIX_ERROR, ANN_OK, ANN::dEdV, ANN::dEdW, ANN::dW, ANN::dXdu, ANN::dXdW, ANN::dy, ANN::dydV, ANN::dydW, ANN::error, ANN::input, ANN::input_scale, ANN::jacobian, matrix_destroy(), ANN::N_layer, ANN::N_neuron, ANN::output, ANN::output_scale, ANN::r, ANN::temp, ANN::v, vector_destroy(), ANN::W, ANN::w_destroy, ANN::w_priv, ANN::Y_neuron, and ANN::yD.

Referenced by main(), AnnElasticConstitutiveLaw< T, Tder >::~AnnElasticConstitutiveLaw(), and AnnElasticConstitutiveLaw< doublereal, doublereal >::~AnnElasticConstitutiveLaw().

218  {
219 
220  int i,j,k;
221 
222  if( vector_destroy( &net->yD ) ){
223  ANN_error( ANN_MATRIX_ERROR, "ANN_destry" );
224  return ANN_MATRIX_ERROR;
225  }
226  if( vector_destroy( &net->input ) ){
227  ANN_error( ANN_MATRIX_ERROR, "ANN_destry" );
228  return ANN_MATRIX_ERROR;
229  }
230  if( vector_destroy( &net->output ) ){
231  ANN_error( ANN_MATRIX_ERROR, "ANN_destry" );
232  return ANN_MATRIX_ERROR;
233  }
234  if( vector_destroy( &net->error ) ){
235  ANN_error( ANN_MATRIX_ERROR, "ANN_destry" );
236  return ANN_MATRIX_ERROR;
237  }
238  if( matrix_destroy( &net->jacobian ) ){
239  ANN_error( ANN_MATRIX_ERROR, "ANN_destry" );
240  return ANN_MATRIX_ERROR;
241  }
242  if( matrix_destroy( &net->input_scale ) ){
243  ANN_error( ANN_MATRIX_ERROR, "ANN_destry" );
244  return ANN_MATRIX_ERROR;
245  }
246  if( matrix_destroy( &net->output_scale ) ){
247  ANN_error( ANN_MATRIX_ERROR, "ANN_destry" );
248  return ANN_MATRIX_ERROR;
249  }
250 
251  for( i=0;i<net->N_layer+1;i++ ){
252  if( matrix_destroy( &net->W[i] ) ){
253  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
254  return ANN_MATRIX_ERROR;
255  }
256  if( matrix_destroy( &net->dEdW[i] ) ){
257  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
258  return ANN_MATRIX_ERROR;
259  }
260  if( matrix_destroy( &net->dW[i] ) ){
261  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
262  return ANN_MATRIX_ERROR;
263  }
264  }
265  free(net->dEdW);
266  free(net->W);
267  free(net->dW);
268 
269  for( i=0;i<net->N_layer+2;i++ ){
270  if( vector_destroy( &net->v[i] ) ){
271  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
272  return ANN_MATRIX_ERROR;
273  }
274  if( vector_destroy( &net->Y_neuron[i] ) ){
275  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
276  return ANN_MATRIX_ERROR;
277  }
278  if( vector_destroy( &net->dXdW[i] ) ){
279  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
280  return ANN_MATRIX_ERROR;
281  }
282  if( vector_destroy( &net->dXdu[i] ) ){
283  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
284  return ANN_MATRIX_ERROR;
285  }
286  if( vector_destroy( &net->temp[i] ) ){
287  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
288  return ANN_MATRIX_ERROR;
289  }
290  if( vector_destroy( &net->dydV[i] ) ){
291  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
292  return ANN_MATRIX_ERROR;
293  }
294  if( vector_destroy( &net->dEdV[i] ) ){
295  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
296  return ANN_MATRIX_ERROR;
297  }
298  }
299  free(net->v);
300  free(net->Y_neuron);
301  free(net->dXdW);
302  free(net->dXdu);
303  free(net->temp);
304  free(net->dydV);
305  free(net->dEdV);
306 
307  for( i=0; i<net->N_neuron[net->N_layer+1]; i++ ){
308  for( j=0; j<net->N_layer+1; j++ ){
309  if( matrix_destroy( &net->dydW[i][j] )) {
310  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
311  return ANN_MATRIX_ERROR;
312  }
313  }
314  free(net->dydW[i]);
315  }
316  free(net->dydW);
317 
318  for( k=0; k<net->r; k++ ){
319  for( i=0; i<net->N_neuron[net->N_layer+1]; i++ ){
320  for( j=0; j<net->N_layer+1; j++ ){
321  if( matrix_destroy( &net->dy[k][i][j] )){
322  ANN_error( ANN_MATRIX_ERROR, "ANN_destroy" );
323  return ANN_MATRIX_ERROR;
324  }
325  }
326  free(net->dy[k][i]);
327  }
328  free( net->dy[k] );
329  }
330  free(net->dy);
331  free(net->N_neuron);
332 
333  if (net->w_destroy(net->w_priv) != 0) {
334  /* error */
335  }
336 
337  return ANN_OK;
338 
339 }
ANN_vector_vector dXdW
Definition: ann.h:115
ANN_vector_matrix W
Definition: ann.h:95
void * w_priv
Definition: ann.h:81
ANN_vector_vector dEdV
Definition: ann.h:115
w_destroy_f w_destroy
Definition: ann.h:77
vector error
Definition: ann.h:116
ANN_vector_vector dXdu
Definition: ann.h:115
vector input
Definition: ann.h:116
ANN_vector_vector v
Definition: ann.h:105
matrix output_scale
Definition: ann.h:102
ANN_vector_matrix dW
Definition: ann.h:114
vector output
Definition: ann.h:116
int r
Definition: ann.h:88
mat_res_t matrix_destroy(matrix *MAT)
Definition: matrix.c:84
int N_layer
Definition: ann.h:86
ANN_vector_vector temp
Definition: ann.h:115
ANN_vector_matrix * dydW
Definition: ann.h:114
int * N_neuron
Definition: ann.h:87
matrix jacobian
Definition: ann.h:98
mat_res_t vector_destroy(vector *VEC)
Definition: matrix.c:97
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
ANN_vector_matrix ** dy
Definition: ann.h:111
ANN_vector_vector dydV
Definition: ann.h:115
Definition: ann.h:56
ANN_vector_matrix dEdW
Definition: ann.h:110
ANN_vector_vector Y_neuron
Definition: ann.h:106
vector yD
Definition: ann.h:107
matrix input_scale
Definition: ann.h:101

Here is the call graph for this function:

ann_res_t ANN_dXdW ( ANN net,
int  I,
int  J,
int  N 
)

Definition at line 609 of file ann.c.

References ANN_error(), ANN_InternalFunctionDer(), ANN_MATRIX_ERROR, ANN_OK, ANN::dXdW, ANN::dy, matrix::mat, matrixT_vector_prod(), ANN::N_input, ANN::N_layer, ANN::N_neuron, ANN::r, ANN::temp, ANN::v, vector::vec, and ANN::W.

Referenced by ANN_dEdW().

609  {
610 
611  int i,j,k;
612 
613  /* inizializzo con la derivata degli ingressi rispetto al peso I,J
614  * delle strato N */
615 
616  /* gli ingressi esterni non dipendono dai pesi */
617  i = 0;
618  for( j=0; j<net->N_input; j++ ){
619  net->dXdW[0].vec[i] = 0.;
620  i++;
621  }
622  for( k=0;k<net->r;k++ ){
623  for( j=0;j<net->N_neuron[net->N_layer+1];j++ ){
624  net->dXdW[0].vec[i] = net->dy[k][j][N].mat[I][J];
625  i++;
626  }
627  }
628  /* calcolo la derivata allo strato di interesse a partire da quella degli ingressi*/
629  for( i=0;i<N;i++ ){
630  if( matrixT_vector_prod( &net->W[i], &net->dXdW[i] ,&net->temp[i+1] ) ){
631  ANN_error( ANN_MATRIX_ERROR, "ANN_dXdW" );
632  return ANN_MATRIX_ERROR;
633  }
634  for( j=0; j<net->N_neuron[i+1];j++ ){
635  net->dXdW[i+1].vec[j] = ANN_InternalFunctionDer(net->v[i+1].vec[j], net )*net->temp[i+1].vec[j];
636  }
637  }
638 
639  return ANN_OK;
640 }
ANN_vector_vector dXdW
Definition: ann.h:115
ANN_vector_matrix W
Definition: ann.h:95
mat_res_t matrixT_vector_prod(matrix *MAT, vector *VEC, vector *VEC_R)
Definition: matrix.c:379
ANN_vector_vector v
Definition: ann.h:105
int r
Definition: ann.h:88
int N_layer
Definition: ann.h:86
ANN_vector_vector temp
Definition: ann.h:115
int N_input
Definition: ann.h:84
int * N_neuron
Definition: ann.h:87
double ** mat
Definition: matrix.h:62
double * vec
Definition: matrix.h:69
double ANN_InternalFunctionDer(double v, ANN *net)
Definition: ann.c:540
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
ANN_vector_matrix ** dy
Definition: ann.h:111
Definition: ann.h:56

Here is the call graph for this function:

void ANN_error ( ann_res_t  error,
char *  string 
)

Definition at line 900 of file ann.c.

References ANN_DATA_ERROR, ANN_GEN_ERROR, ANN_MATRIX_ERROR, ANN_NO_FILE, and ANN_NO_MEMORY.

Referenced by ANN_DataRead(), ANN_DataWrite(), ANN_dEdW(), ANN_destroy(), ANN_dXdW(), ANN_init(), ANN_InternalFunction(), ANN_InternalFunctionDer(), ANN_jacobian_matrix(), ANN_reset(), ANN_sim(), ANN_TotalError(), ANN_TrainingEpoch(), ANN_vector_matrix_init(), ANN_vector_vector_init(), ANN_WeightUpdate(), and ANN_write().

900  {
901 
902  switch(error) {
903  case ANN_NO_MEMORY: fprintf( stderr, "Memory error(@ %s)\n", string );
904  break;
905  case ANN_MATRIX_ERROR: fprintf( stderr, "Error in using matrix library(@ %s)\n", string );
906  break;
907  case ANN_NO_FILE: fprintf( stderr, "Error in file opening(@ %s)\n", string );
908  break;
909  case ANN_DATA_ERROR: fprintf( stderr, "Error in data value(@ %s)\n", string );
910  break;
911  case ANN_GEN_ERROR: fprintf( stderr, "Error(@ %s)\n", string );
912  break;
913  default: break;
914  }
915 }
int error(const char *test, int value)
ann_res_t ANN_init ( ANN net,
const char *  FileName 
)

Definition at line 48 of file ann.c.

References ANN_DATA_ERROR, ANN_error(), ANN_MATRIX_ERROR, ANN_NO_FILE, ANN_NO_MEMORY, ANN_OK, ANN_vector_matrix_init(), ANN_vector_vector_init(), ANN::dEdV, ANN::dEdW, ANN::dW, ANN::dXdu, ANN::dXdW, ANN::dy, ANN::dydV, ANN::dydW, ANN::error, ANN::eta, ANN::input, ANN::input_scale, ANN::jacobian, matrix_init(), matrix_read(), ANN::N_input, ANN::N_layer, ANN::N_neuron, ANN::N_output, ANN::output, ANN::output_scale, ANN::r, ANN::rho, ANN::temp, ANN::v, vector_init(), ANN::W, ANN::w_destroy, ANN::w_eval, W_F_NONE, ANN::w_init, w_linear_destroy(), w_linear_eval(), w_linear_init(), w_linear_read(), w_linear_write(), W_M_BIN, ANN::w_priv, ANN::w_read, w_tanh_destroy(), w_tanh_eval(), w_tanh_init(), w_tanh_read(), w_tanh_write(), ANN::w_write, ANN::Y_neuron, and ANN::yD.

Referenced by AnnElasticConstitutiveLaw< T, Tder >::AnnInit(), AnnElasticConstitutiveLaw< doublereal, doublereal >::AnnInit(), and main().

48  {
49 
50  int i,j;
51  int ActFnc;
52  FILE *fh;
53 
54  memset( net, 0, sizeof( ANN ) );
55 
56  if ( !(fh = fopen( FileName, "r" ) ) ){
57  fprintf( stderr, "Input file doesn't exist.\n" );
58  ANN_error( ANN_NO_FILE, "ANN_init" );
59  return ANN_NO_FILE;
60  }
61 
62  fscanf(fh,"%d",&(net->N_input));
63  if( net->N_input <= 0 ){
64  fprintf( stderr, "Input number must be greater than zero.\n");
65  ANN_error( ANN_DATA_ERROR, "ANN_init" );
66  return ANN_DATA_ERROR;
67  }
68  fscanf(fh,"%d",&(net->N_output));
69  if( net->N_output <= 0 ){
70  fprintf( stderr, "Output number must be greater than zero.\n");
71  ANN_error( ANN_DATA_ERROR, "ANN_init" );
72  return ANN_DATA_ERROR;
73  }
74  fscanf(fh,"%d",&(net->N_layer));
75  if( net->N_layer < 0 ){
76  fprintf( stderr, "Hidden layer number must be not negative.\n");
77  ANN_error( ANN_DATA_ERROR, "ANN_init" );
78  return ANN_DATA_ERROR;
79  }
80  fscanf(fh,"%d",&(net->r));
81  if( net->r < 0 ){
82  fprintf( stderr, "Timestep delay number must be not negative.\n");
83  ANN_error( ANN_DATA_ERROR, "ANN_init" );
84  return ANN_DATA_ERROR;
85  }
86 
87  if( !(net->N_neuron = (int *)malloc( (net->N_layer+2) * sizeof(int) ) ) ){
88  ANN_error( ANN_NO_MEMORY, "ANN_init" );
89  return ANN_NO_MEMORY;
90  }
91  net->N_neuron[0] = 0;
92  for( i=0;i<net->N_layer+1;i++ ){
93  fscanf(fh,"%d",&(net->N_neuron[i+1]));
94  if( net->N_neuron[i+1] <= 0 ){
95  fprintf( stderr, "Neuron number at %d layer must be greater than zero.\n",i+1);
96  ANN_error( ANN_DATA_ERROR, "ANN_init" );
97  return ANN_DATA_ERROR;
98  }
99  }
100  net->N_neuron[0] = net->N_input + ( net->r * net->N_neuron[net->N_layer+1] );
101 
102  fscanf( fh, "%d", &ActFnc);
103  switch( ActFnc ){
104  case 1: /* use tanh */
105  net->w_init = w_tanh_init;
106  net->w_destroy = w_tanh_destroy;
107  net->w_read = w_tanh_read;
108  net->w_write = w_tanh_write;
109  net->w_eval = w_tanh_eval;
110  break;
111  case 2: /* use linear activation function */
112  net->w_init = w_linear_init;
114  net->w_read = w_linear_read;
115  net->w_write = w_linear_write;
116  net->w_eval = w_linear_eval;
117  break;
118  default:
119  fprintf( stderr, "Unknown activation function\n" );
120  ANN_error( ANN_DATA_ERROR, "ANN_init" );
121  return ANN_DATA_ERROR;
122  }
123  /* end of switch */
124 
125  net->w_priv = NULL;
126  if (net->w_init(&net->w_priv) != 0) {
127  /* error */
128  }
129 
130  net->w_read(net->w_priv, fh, W_F_NONE);
131 
132  fscanf( fh, "%le", &(net->eta));
133  if( net->eta < 0 ){
134  fprintf( stderr, "Learning rate must be not negative.\n");
135  ANN_error( ANN_DATA_ERROR, "ANN_init" );
136  return ANN_DATA_ERROR;
137  }
138  fscanf( fh, "%le", &(net->rho));
139 
140  ANN_vector_matrix_init(&net->W, net->N_neuron, net->N_layer);
141  ANN_vector_matrix_init(&net->dEdW, net->N_neuron, net->N_layer);
142  ANN_vector_matrix_init(&net->dW, net->N_neuron, net->N_layer);
143 
144  for( i=0; i<net->N_layer+1; i++ ){
145  matrix_read( &net->W[i], fh, W_M_BIN );
146  }
147  if( matrix_init( &net->jacobian, net->N_input, net->N_output) ){
148  ANN_error( ANN_MATRIX_ERROR, "ANN_init" );
149  return ANN_MATRIX_ERROR;
150  }
151  if( matrix_init( &net->input_scale, net->N_input, 2) ){
152  ANN_error( ANN_MATRIX_ERROR, "ANN_init" );
153  return ANN_MATRIX_ERROR;
154  }
155  if( matrix_init( &net->output_scale, net->N_output, 2 ) ){
156  ANN_error( ANN_MATRIX_ERROR, "ANN_init" );
157  return ANN_MATRIX_ERROR;
158  }
159  matrix_read( &net->input_scale, fh, W_M_BIN );
160  matrix_read( &net->output_scale, fh, W_M_BIN );
161 
162  ANN_vector_vector_init(&net->v, net->N_neuron, net->N_layer);
163  ANN_vector_vector_init(&net->Y_neuron, net->N_neuron, net->N_layer);
164  ANN_vector_vector_init(&net->dXdW, net->N_neuron, net->N_layer);
165  ANN_vector_vector_init(&net->temp, net->N_neuron, net->N_layer);
166  ANN_vector_vector_init(&net->dydV, net->N_neuron, net->N_layer);
167  ANN_vector_vector_init(&net->dEdV, net->N_neuron, net->N_layer);
168  ANN_vector_vector_init(&net->dXdu, net->N_neuron, net->N_layer);
169 
170  if( net->r > 0 ){
171  if( vector_init( &net->yD, net->r*net->N_neuron[net->N_layer+1] ) ){
172  ANN_error( ANN_MATRIX_ERROR, "ANN_init" );
173  return ANN_MATRIX_ERROR;
174  }
175  }
176  if( vector_init( &net->input, net->N_input ) ){
177  ANN_error( ANN_MATRIX_ERROR, "ANN_init" );
178  return ANN_MATRIX_ERROR;
179  }
180  if( vector_init( &net->output, net->N_output ) ){
181  ANN_error( ANN_MATRIX_ERROR, "ANN_init" );
182  return ANN_MATRIX_ERROR;
183  }
184  if( vector_init( &net->error, net->N_output ) ){
185  ANN_error( ANN_MATRIX_ERROR, "ANN_init" );
186  return ANN_MATRIX_ERROR;
187  }
188 
189 
190  if( !(net->dydW = (ANN_vector_matrix *)calloc( net->N_neuron[net->N_layer+1], sizeof(ANN_vector_matrix) ) ) ){
191  ANN_error( ANN_NO_MEMORY, "ANN_init" );
192  return ANN_NO_MEMORY;
193  }
194  for( i=0; i<net->N_neuron[net->N_layer+1]; i++ ){
195  ANN_vector_matrix_init(&net->dydW[i], net->N_neuron, net->N_layer);
196  }
197 
198  if( !(net->dy = (ANN_vector_matrix **)calloc( net->r, sizeof(ANN_vector_matrix *) ) ) ){
199  ANN_error( ANN_NO_MEMORY, "ANN_init" );
200  return ANN_NO_MEMORY;
201  }
202  for( i=0; i<net->r; i++){
203  if( !(net->dy[i] = (ANN_vector_matrix *)calloc( net->N_neuron[net->N_layer+1], sizeof(ANN_vector_matrix) ) ) ){
204  ANN_error( ANN_NO_MEMORY, "ANN_init" );
205  return ANN_NO_MEMORY;
206  }
207  for( j=0; j<net->N_neuron[net->N_layer+1]; j++ ){
208  ANN_vector_matrix_init( &net->dy[i][j], net->N_neuron, net->N_layer );
209  }
210  }
211 
212  fclose(fh);
213 
214  return ANN_OK;
215 }
ANN_vector_vector dXdW
Definition: ann.h:115
int w_linear_write(void *priv, FILE *fh, unsigned flags)
w_eval_f w_eval
Definition: ann.h:80
int w_tanh_eval(void *priv, double in, int order, double *outp)
ANN_vector_matrix W
Definition: ann.h:95
void * w_priv
Definition: ann.h:81
ANN_vector_vector dEdV
Definition: ann.h:115
int w_linear_init(void **privp)
int w_tanh_read(void *priv, FILE *fh, unsigned flags)
w_write_f w_write
Definition: ann.h:79
mat_res_t vector_init(vector *VEC, unsigned dimension)
Definition: matrix.c:68
double eta
Definition: ann.h:91
w_destroy_f w_destroy
Definition: ann.h:77
int N_output
Definition: ann.h:85
int w_tanh_write(void *priv, FILE *fh, unsigned flags)
ann_res_t ANN_vector_vector_init(ANN_vector_vector *vv, int *N_neuron, int N_layer)
Definition: ann.c:588
vector error
Definition: ann.h:116
ann_res_t ANN_vector_matrix_init(ANN_vector_matrix *vm, int *N_neuron, int N_layer)
Definition: ann.c:569
#define W_M_BIN
Definition: matrix.h:48
ANN_vector_vector dXdu
Definition: ann.h:115
Definition: matrix.h:61
int w_tanh_init(void **privp)
vector input
Definition: ann.h:116
ANN_vector_vector v
Definition: ann.h:105
int w_linear_eval(void *priv, double in, int order, double *outp)
matrix output_scale
Definition: ann.h:102
int w_tanh_destroy(void *priv)
ANN_vector_matrix dW
Definition: ann.h:114
vector output
Definition: ann.h:116
int r
Definition: ann.h:88
w_init_f w_init
Definition: ann.h:76
int N_layer
Definition: ann.h:86
ANN_vector_vector temp
Definition: ann.h:115
int N_input
Definition: ann.h:84
ANN_vector_matrix * dydW
Definition: ann.h:114
int * N_neuron
Definition: ann.h:87
mat_res_t matrix_read(matrix *MAT, FILE *fh, unsigned flags)
Definition: matrix.c:506
matrix jacobian
Definition: ann.h:98
Definition: ann.h:74
#define W_F_NONE
int w_linear_read(void *priv, FILE *fh, unsigned flags)
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
ANN_vector_matrix ** dy
Definition: ann.h:111
ANN_vector_vector dydV
Definition: ann.h:115
w_read_f w_read
Definition: ann.h:78
Definition: ann.h:56
mat_res_t matrix_init(matrix *MAT, unsigned Nrow, unsigned Ncolumn)
Definition: matrix.c:43
int w_linear_destroy(void *priv)
ANN_vector_matrix dEdW
Definition: ann.h:110
ANN_vector_vector Y_neuron
Definition: ann.h:106
double rho
Definition: ann.h:92
vector yD
Definition: ann.h:107
matrix input_scale
Definition: ann.h:101

Here is the call graph for this function:

double ANN_InternalFunction ( double  v,
ANN net 
)

Definition at line 526 of file ann.c.

References ANN_error(), ANN_GEN_ERROR, ANN::w_eval, and ANN::w_priv.

Referenced by ANN_dEdW(), and ANN_sim().

526  {
527 
528  double y;
529 
530  if (net->w_eval(net->w_priv, v, 0, &y) != 0) {
531  ANN_error( ANN_GEN_ERROR, "ANN_InternalFunction" );
532  return ANN_GEN_ERROR;
533  }
534 
535  return y;
536 }
w_eval_f w_eval
Definition: ann.h:80
void * w_priv
Definition: ann.h:81
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900

Here is the call graph for this function:

double ANN_InternalFunctionDer ( double  v,
ANN net 
)

Definition at line 540 of file ann.c.

References ANN_error(), ANN_GEN_ERROR, ANN::w_eval, and ANN::w_priv.

Referenced by ANN_dEdW(), ANN_dXdW(), and ANN_jacobian_matrix().

540  {
541 
542  double y;
543 
544  if (net->w_eval(net->w_priv, v, 1, &y) != 0) {
545  ANN_error( ANN_GEN_ERROR, "ANN_InternalFunctionDer" );
546  return ANN_GEN_ERROR;
547  }
548  return y;
549 }
w_eval_f w_eval
Definition: ann.h:80
void * w_priv
Definition: ann.h:81
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900

Here is the call graph for this function:

ann_res_t ANN_jacobian_matrix ( ANN net,
matrix jacobian 
)

Definition at line 921 of file ann.c.

References ANN_error(), ANN_InternalFunctionDer(), ANN_MATRIX_ERROR, ANN_OK, ANN::dXdu, ANN::input_scale, matrix::mat, matrixT_vector_prod(), ANN::N_input, ANN::N_layer, ANN::N_neuron, ANN::N_output, ANN::output_scale, ANN::temp, ANN::v, vector::vec, vector_null(), and ANN::W.

Referenced by AnnElasticConstitutiveLaw< T, Tder >::Update(), AnnElasticConstitutiveLaw< doublereal, doublereal >::Update(), AnnViscoElasticConstitutiveLaw< T, Tder >::Update(), and AnnViscoElasticConstitutiveLaw< doublereal, doublereal >::Update().

921  {
922 
923  unsigned i, j, k;
924 
925  for( i=0; i<net->N_input; i++ ){
926  vector_null( &net->dXdu[0] );
927  net->dXdu[0].vec[i] = 1.;
928  for( j=0; j<net->N_layer+1; j++ ){
929  if( matrixT_vector_prod( &net->W[j], &net->dXdu[j] ,&net->temp[j+1] ) ){
930  ANN_error( ANN_MATRIX_ERROR, "ANN_dXdW" );
931  return ANN_MATRIX_ERROR;
932  }
933  for( k=0; k<net->N_neuron[j+1];k++ ){
934  net->dXdu[j+1].vec[k] = ANN_InternalFunctionDer(net->v[j+1].vec[k], net )*net->temp[j+1].vec[k];
935  }
936  }
937  for( k=0; k<net->N_output; k++ ){
938  //jacobian->mat[i][k] = ( net->output_scale.mat[k][0] * net->input_scale.mat[i][0] )*net->dXdu[net->N_layer+1].vec[k];
939  jacobian->mat[i][k] = ( net->input_scale.mat[i][0] / net->output_scale.mat[k][0] )*net->dXdu[net->N_layer+1].vec[k];
940  }
941  }
942  return ANN_OK;
943 }
mat_res_t vector_null(vector *VEC)
Definition: matrix.c:176
ANN_vector_matrix W
Definition: ann.h:95
int N_output
Definition: ann.h:85
mat_res_t matrixT_vector_prod(matrix *MAT, vector *VEC, vector *VEC_R)
Definition: matrix.c:379
ANN_vector_vector dXdu
Definition: ann.h:115
ANN_vector_vector v
Definition: ann.h:105
matrix output_scale
Definition: ann.h:102
int N_layer
Definition: ann.h:86
ANN_vector_vector temp
Definition: ann.h:115
int N_input
Definition: ann.h:84
int * N_neuron
Definition: ann.h:87
double ** mat
Definition: matrix.h:62
double * vec
Definition: matrix.h:69
double ANN_InternalFunctionDer(double v, ANN *net)
Definition: ann.c:540
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
Definition: ann.h:56
matrix input_scale
Definition: ann.h:101

Here is the call graph for this function:

ann_res_t ANN_reset ( ANN net)

Definition at line 828 of file ann.c.

References ANN_error(), ANN_MATRIX_ERROR, ANN_OK, ANN::dW, ANN::dy, matrix_null(), ANN::N_layer, ANN::N_neuron, ANN::r, vector_null(), and ANN::yD.

Referenced by main().

828  {
829 
830  int i,j,k;
831 
832  if( net->r != 0 ) {
833  if( vector_null( &net->yD ) ){
834  ANN_error( ANN_MATRIX_ERROR, "ANN_reset" );
835  return ANN_MATRIX_ERROR;
836  }
837  }
838  for( i=0; i<net->N_layer+1; i++ ){
839  if( matrix_null( &net->dW[i] ) ){
840  ANN_error( ANN_MATRIX_ERROR, "ANN_reset" );
841  return ANN_MATRIX_ERROR;
842  }
843  }
844 
845  for( i=0; i<net->r; i++ ){
846  for( j=0; j<net->N_neuron[net->N_layer+1]; j++ ){
847  for( k=0; k<net->N_layer+1; k++ ){
848  if( matrix_null( &net->dy[i][j][k] ) ){
849  ANN_error( ANN_MATRIX_ERROR, "ANN_error" );
850  return ANN_MATRIX_ERROR;
851  }
852  }
853  }
854  }
855 
856  return ANN_OK;
857 }
mat_res_t vector_null(vector *VEC)
Definition: matrix.c:176
mat_res_t matrix_null(matrix *MAT)
Definition: matrix.c:107
ANN_vector_matrix dW
Definition: ann.h:114
int r
Definition: ann.h:88
int N_layer
Definition: ann.h:86
int * N_neuron
Definition: ann.h:87
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
ANN_vector_matrix ** dy
Definition: ann.h:111
Definition: ann.h:56
vector yD
Definition: ann.h:107

Here is the call graph for this function:

ann_res_t ANN_sim ( ANN net,
vector input,
vector output,
unsigned  flags 
)

Definition at line 425 of file ann.c.

References ANN_error(), ANN_FEEDBACK_UPDATE, ANN_InternalFunction(), ANN_MATRIX_ERROR, ANN_OK, ANN::input_scale, matrix::mat, matrixT_vector_prod(), ANN::N_input, ANN::N_layer, ANN::N_neuron, ANN::N_output, ANN::output_scale, ANN::r, ANN::v, vector::vec, ANN::W, ANN::Y_neuron, and ANN::yD.

Referenced by ANN_TrainingEpoch(), main(), AnnElasticConstitutiveLaw< T, Tder >::Update(), AnnElasticConstitutiveLaw< doublereal, doublereal >::Update(), AnnViscoElasticConstitutiveLaw< T, Tder >::Update(), and AnnViscoElasticConstitutiveLaw< doublereal, doublereal >::Update().

425  {
426 
427  int i,j;
428 
429  /* costruisco il vettore degli ingressi
430  * incolonnando agli ingressi esterni le retroazioni
431  * delle uscite precedenti */
432 
433  for( i=0; i<net->N_input; i++ ){
434  net->Y_neuron[0].vec[i] = net->input_scale.mat[i][1] + input->vec[i]*net->input_scale.mat[i][0];
435  }
436  for( i=0; i<net->N_neuron[net->N_layer+1]*net->r; i++ ){
437  net->Y_neuron[0].vec[i+net->N_input] = net->yD.vec[i];
438  }
439 
440  for( i=0;i<net->N_neuron[0];i++ ){
441  net->v[0].vec[i] = net->Y_neuron[0].vec[i];
442  }
443 
444  /* calcolo il vettore delle uscite */
445  for( i=0;i<net->N_layer+1;i++ ){
446  if( matrixT_vector_prod( &net->W[i], &net->Y_neuron[i] ,&net->v[i+1] ) ){
447  ANN_error( ANN_MATRIX_ERROR, "ANN_sim" );
448  return ANN_MATRIX_ERROR;
449  }
450  for( j=0; j<net->N_neuron[i+1];j++ ){
451  net->Y_neuron[i+1].vec[j] = ANN_InternalFunction(net->v[i+1].vec[j], net );
452  }
453  }
454 
455  for( i=0;i<net->N_output;i++ ){
456  //output->vec[i] = net->output_scale.mat[i][1] + net->Y_neuron[net->N_layer+1].vec[i]*net->output_scale.mat[i][0];
457  output->vec[i] = (net->Y_neuron[net->N_layer+1].vec[i]-net->output_scale.mat[i][1])/net->output_scale.mat[i][0];
458  }
459 
460  /* aggiorno il vettore delle uscite retroazionate */
461  if( flags & ANN_FEEDBACK_UPDATE ){
462  for( i=0;i<(net->r-1)*(net->N_neuron[net->N_layer+1]);i++ ){
463  net->yD.vec[((net->r)*(net->N_neuron[net->N_layer+1]))-1-i] = net->yD.vec[((net->r-1)*(net->N_neuron[net->N_layer+1]))-1-i];
464  }
465  if( net->r != 0 ){
466  for( i=0;i<net->N_neuron[net->N_layer+1];i++ ){
467  net->yD.vec[i] = net->Y_neuron[net->N_layer+1].vec[i];
468  }
469  }
470  }
471 
472  return ANN_OK;
473 }
ANN_vector_matrix W
Definition: ann.h:95
int N_output
Definition: ann.h:85
mat_res_t matrixT_vector_prod(matrix *MAT, vector *VEC, vector *VEC_R)
Definition: matrix.c:379
ANN_vector_vector v
Definition: ann.h:105
matrix output_scale
Definition: ann.h:102
int r
Definition: ann.h:88
#define ANN_FEEDBACK_UPDATE
Definition: ann.h:52
int N_layer
Definition: ann.h:86
int N_input
Definition: ann.h:84
int * N_neuron
Definition: ann.h:87
double ** mat
Definition: matrix.h:62
double * vec
Definition: matrix.h:69
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
Definition: ann.h:56
ANN_vector_vector Y_neuron
Definition: ann.h:106
double ANN_InternalFunction(double v, ANN *net)
Definition: ann.c:526
vector yD
Definition: ann.h:107
matrix input_scale
Definition: ann.h:101

Here is the call graph for this function:

ann_res_t ANN_TotalError ( matrix DES_OUTPUT,
matrix NN_OUTPUT,
double *  err 
)

Definition at line 861 of file ann.c.

References ANN_error(), ANN_GEN_ERROR, ANN_OK, matrix::mat, matrix::Ncolumn, and matrix::Nrow.

Referenced by main().

861  {
862 
863  int i,j;
864 
865  if( DES_OUTPUT->Nrow != NN_OUTPUT->Nrow || DES_OUTPUT->Ncolumn != NN_OUTPUT->Ncolumn ){
866  fprintf( stderr, "Incompatible dimensions\n" );
867  ANN_error( ANN_GEN_ERROR, "ANN_TotalError" );
868  return ANN_GEN_ERROR;
869  }
870 
871  *err = 0.;
872  for( i=0; i<DES_OUTPUT->Nrow; i++ ){
873  for( j=0; j<DES_OUTPUT->Ncolumn; j++ ){
874  *err += 0.5*( DES_OUTPUT->mat[i][j]-NN_OUTPUT->mat[i][j] )*( DES_OUTPUT->mat[i][j]-NN_OUTPUT->mat[i][j] );
875  }
876  }
877 
878  return ANN_OK;
879 }
unsigned Ncolumn
Definition: matrix.h:64
unsigned Nrow
Definition: matrix.h:63
double ** mat
Definition: matrix.h:62
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
Definition: ann.h:56

Here is the call graph for this function:

ann_res_t ANN_TrainingEpoch ( ANN net,
matrix INPUT,
matrix DES_OUTPUT,
matrix NN_OUTPUT,
int  N_sample,
ann_training_mode_t  mode 
)

Definition at line 769 of file ann.c.

References ANN_dEdW(), ANN_error(), ANN_FEEDBACK_UPDATE, ANN_GEN_ERROR, ANN_MATRIX_ERROR, ANN_OK, ANN_sim(), ANN_TM_BATCH, ANN_TM_SEQUENTIAL, ANN_WeightUpdate(), ANN::dEdW, ANN::dW, ANN::error, ANN::input, matrix::mat, matrix_sum(), ANN::N_input, ANN::N_layer, ANN::N_output, ANN::output, and vector::vec.

Referenced by main().

769  {
770 
771  int i,t;
772 
773  for( t=0; t<N_sample; t++ ){
774 
775  /* costrisco il vettore degli ingressi al tempo t
776  * partendo dalla matrice degli ingressi INPUT */
777  for( i=0; i<net->N_input; i++ ){
778  net->input.vec[i] = INPUT->mat[t][i];
779  }
780  /* simulo la rete per calcolare le uscite al passo t
781  * e il corrispondente errore */
782  if( ANN_sim( net, &net->input, &net->output, ANN_FEEDBACK_UPDATE) ){
783  ANN_error( ANN_GEN_ERROR, "ANN_TrainigEpoch" );
784  return ANN_GEN_ERROR;
785  }
786  for( i=0; i<net->N_output; i++ ){
787  net->error.vec[i] = DES_OUTPUT->mat[t][i]-net->output.vec[i];
788  NN_OUTPUT->mat[t][i] = net->output.vec[i];
789  }
790  /* calcolo la derivata dell'errore rispetto a tutti i pesi
791  * sinaptici */
792  if( ANN_dEdW( net, &net->error) ){
793  ANN_error( ANN_GEN_ERROR, "ANN_TrainingEpoch" );
794  return ANN_GEN_ERROR;
795  }
796  /* addestramento in modalità BATCH: accumulo la derivata
797  * e la applico solamente alla fine di un epoca di
798  * addestramento */
799  if( mode == ANN_TM_BATCH ){
800  for( i=0;i<net->N_layer+1;i++ ){
801  if( matrix_sum( &net->dW[i], &net->dEdW[i], &net->dW[i], 1. ) ){
802  ANN_error( ANN_MATRIX_ERROR, "ANN_TrainingEpoch" );
803  return ANN_MATRIX_ERROR;
804  }
805  }
806  }
807  /* addestramento in modalità SEQUENTIAL: applico
808  * immediatamente la variazione dei pesi */
809  if( mode == ANN_TM_SEQUENTIAL ){
810  if( ANN_WeightUpdate( net, net->dEdW, 1. ) ){
811  ANN_error( ANN_GEN_ERROR, "ANN_TrainingEpoch" );
812  return ANN_GEN_ERROR;
813  }
814  }
815  }
816  if( mode == ANN_TM_BATCH ){
817  if( ANN_WeightUpdate( net, net->dW, 1. ) ){
818  ANN_error( ANN_GEN_ERROR, "ANN_trainingEpoch" );
819  return ANN_GEN_ERROR;
820  }
821  }
822 
823  return ANN_OK;
824 }
ann_res_t ANN_sim(ANN *net, vector *input, vector *output, unsigned flags)
Definition: ann.c:425
ann_res_t ANN_WeightUpdate(ANN *net, ANN_vector_matrix DW, double K)
Definition: ann.c:552
int N_output
Definition: ann.h:85
vector error
Definition: ann.h:116
vector input
Definition: ann.h:116
ANN_vector_matrix dW
Definition: ann.h:114
vector output
Definition: ann.h:116
#define ANN_FEEDBACK_UPDATE
Definition: ann.h:52
int N_layer
Definition: ann.h:86
int N_input
Definition: ann.h:84
double ** mat
Definition: matrix.h:62
mat_res_t matrix_sum(matrix *MAT1, matrix *MAT2, matrix *MAT_R, double K)
Definition: matrix.c:409
double * vec
Definition: matrix.h:69
ann_res_t ANN_dEdW(ANN *net, vector *e)
Definition: ann.c:645
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
Definition: ann.h:56
ANN_vector_matrix dEdW
Definition: ann.h:110

Here is the call graph for this function:

ann_res_t ANN_vector_matrix_ass ( ANN_vector_matrix vm1,
ANN_vector_matrix vm2,
int N_neuron,
int  N_layer,
double  K 
)

Definition at line 883 of file ann.c.

References ANN_OK, and mat.

Referenced by ANN_dEdW(), and main().

883  {
884 
885  unsigned i,j,k;
886 
887  for( i=0; i<N_layer+1; i++ ){
888  for( j=0; j<N_neuron[i]; j++ ){
889  for( k=0; k<N_neuron[i+1]; k++ ){
890  //vm1[i]->mat[j][k] = K*vm2[i]->mat[j][k];
891  ((*vm1)[i]).mat[j][k] = K*((*vm2)[i]).mat[j][k];
892  //(&((*vm1)[i]))->mat[j][k] = K*(&((*vm2)[i]))->mat[j][k];
893  }
894  }
895  }
896  return ANN_OK;
897 }
static doublereal mat[5][5]
Definition: dgeequtest.cc:45
Definition: ann.h:56
ann_res_t ANN_vector_matrix_init ( ANN_vector_matrix vm,
int N_neuron,
int  N_layer 
)

Definition at line 569 of file ann.c.

References ANN_error(), ANN_MATRIX_ERROR, ANN_NO_MEMORY, ANN_OK, and matrix_init().

Referenced by ANN_init(), and main().

569  {
570 
571  int i;
572 
573  if( !( *vm = (matrix *)calloc( N_layer+1, sizeof(matrix) ) ) ){
574  ANN_error( ANN_NO_MEMORY, "ANN_vector_matrix_init" );
575  return ANN_NO_MEMORY;
576  }
577  for( i=0; i<N_layer+1; i++ ){
578  if( matrix_init( &(*vm)[i], N_neuron[i], N_neuron[i+1] ) ){
579  ANN_error( ANN_MATRIX_ERROR, "ANN_vector_matrix_init" );
580  return ANN_MATRIX_ERROR;
581  }
582  }
583 
584  return ANN_OK;
585 }
Definition: matrix.h:61
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
Definition: ann.h:56
mat_res_t matrix_init(matrix *MAT, unsigned Nrow, unsigned Ncolumn)
Definition: matrix.c:43

Here is the call graph for this function:

ann_res_t ANN_vector_vector_init ( ANN_vector_vector vv,
int N_neuron,
int  N_layer 
)

Definition at line 588 of file ann.c.

References ANN_error(), ANN_MATRIX_ERROR, ANN_NO_MEMORY, ANN_OK, and vector_init().

Referenced by ANN_init().

588  {
589 
590  int i;
591 
592  if( !( *vv = (ANN_vector_vector)calloc( N_layer+2, sizeof(vector) ) ) ){
593  ANN_error( ANN_NO_MEMORY, "ANN_vector_vector_init" );
594  return ANN_NO_MEMORY;
595  }
596  for( i=0; i<N_layer+2; i++ ){
597  if( vector_init( &(*vv)[i], N_neuron[i] ) ){
598  ANN_error( ANN_MATRIX_ERROR, "ANN_vector_vector_init" );
599  return ANN_MATRIX_ERROR;
600  }
601  }
602 
603  return ANN_OK;
604 }
Definition: matrix.h:68
mat_res_t vector_init(vector *VEC, unsigned dimension)
Definition: matrix.c:68
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
Definition: ann.h:56

Here is the call graph for this function:

ann_res_t ANN_WeightUpdate ( ANN net,
ANN_vector_matrix  DW,
double  K 
)

Definition at line 552 of file ann.c.

References ANN_error(), ANN_MATRIX_ERROR, ANN_OK, matrix_sum(), ANN::N_layer, and ANN::W.

Referenced by ANN_TrainingEpoch(), and main().

552  {
553 
554  int i;
555 
556  for( i=0;i<net->N_layer+1;i++ ){
557  if( matrix_sum( &net->W[i], &DW[i], &net->W[i], K ) ){
558  ANN_error( ANN_MATRIX_ERROR, "ANN_WeightUpdate" );
559  return ANN_MATRIX_ERROR;
560  }
561  }
562 
563  return ANN_OK;
564 
565 }
ANN_vector_matrix W
Definition: ann.h:95
int N_layer
Definition: ann.h:86
mat_res_t matrix_sum(matrix *MAT1, matrix *MAT2, matrix *MAT_R, double K)
Definition: matrix.c:409
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
Definition: ann.h:56

Here is the call graph for this function:

ann_res_t ANN_write ( ANN net,
FILE *  fh,
unsigned  flags 
)

Definition at line 341 of file ann.c.

References ANN_error(), ANN_MATRIX_ERROR, ANN_OK, ANN_W_A_BIN, ANN_W_A_TEXT, ANN::eta, ANN::input_scale, matrix_write(), ANN::N_input, ANN::N_layer, ANN::N_neuron, ANN::N_output, ANN::output_scale, ANN::r, ANN::rho, ANN::W, W_F_BIN, W_F_TEXT, W_M_BIN, W_M_TEXT, ANN::w_priv, and ANN::w_write.

Referenced by main().

341  {
342 
343  int i;
344 
345  if( flags & ANN_W_A_TEXT ){
346  fprintf( fh, "ARTIFICIAL NEURAL NETWORK\n");
347  fprintf( fh, "Network topology\n");
348  fprintf( fh, "-Input number: %d \n", net->N_input);
349  fprintf( fh, "-Output number: %d \n", net->N_output);
350  fprintf( fh, "-Hidden layers number: %d \n", net->N_layer);
351 
352  for( i=0;i<net->N_layer+1;i++ ){
353  fprintf( fh, "-Neurons number (layer number %d) : %d\n", i+1, net->N_neuron[i+1]);
354  }
355  fprintf( fh, "-Time delay number: %d \n", net->r);
356 
357  fprintf( fh, "Training parameters\n");
358  fprintf( fh, "-Learning rate: %e \n", net->eta);
359  fprintf( fh, "-Momentum term: %e \n", net->rho);
360 
361  fprintf( fh, "Activation function parameters\n");
362  net->w_write(net->w_priv, fh, W_F_TEXT);
363 
364  fprintf( fh, "Synaptic weight\n");
365 
366  for( i=0; i<net->N_layer+1; i++ ){
367  if( i!=net->N_layer )
368  fprintf( fh, "-Layer number %d :\n",i+1);
369  else
370  fprintf( fh, "-Visible layer :\n");
371  if( matrix_write( &net->W[i], fh, W_M_TEXT ) ){
372  ANN_error( ANN_MATRIX_ERROR, "ANN_write" );
373  return ANN_MATRIX_ERROR;
374  }
375  }
376  fprintf( fh, "Input scale factors\n" );
377  if( matrix_write( &net->input_scale, fh, W_M_TEXT ) ){
378  ANN_error( ANN_MATRIX_ERROR, "ANN_write" );
379  return ANN_MATRIX_ERROR;
380  }
381  fprintf( fh, "Output scale factors\n" );
382  if( matrix_write( &net->output_scale, fh, W_M_TEXT ) ){
383  ANN_error( ANN_MATRIX_ERROR, "ANN_write" );
384  return ANN_MATRIX_ERROR;
385  }
386  }
387  if( flags & ANN_W_A_BIN ){
388  fprintf( fh, " %d \n", net->N_input);
389  fprintf( fh, " %d \n", net->N_output);
390  fprintf( fh, " %d \n", net->N_layer);
391  fprintf( fh, " %d \n", net->r);
392 
393  for( i=0;i<net->N_layer+1;i++ ){
394  fprintf( fh, " %d ", net->N_neuron[i+1]);
395  }
396  fprintf( fh, "\n\n" );
397 
398  net->w_write(net->w_priv, fh, W_F_BIN);
399  fprintf( fh, "\n" );
400 
401  fprintf( fh, " %e \n", net->eta);
402  fprintf( fh, " %e \n", net->rho);
403  fprintf( fh, "\n" );
404 
405  for( i=0; i<net->N_layer+1; i++ ){
406  if( matrix_write( &net->W[i], fh, W_M_BIN ) ){
407  ANN_error( ANN_MATRIX_ERROR, "ANN_write" );
408  return ANN_MATRIX_ERROR;
409  }
410  fprintf( fh, "\n\n" );
411  }
412  if( matrix_write( &net->input_scale, fh, W_M_BIN ) ){
413  ANN_error( ANN_MATRIX_ERROR, "ANN_write" );
414  return ANN_MATRIX_ERROR;
415  }
416  if( matrix_write( &net->output_scale, fh, W_M_BIN ) ){
417  ANN_error( ANN_MATRIX_ERROR, "ANN_write" );
418  return ANN_MATRIX_ERROR;
419  }
420  }
421  return ANN_OK;
422 }
mat_res_t matrix_write(matrix *MAT, FILE *fh, unsigned flags)
Definition: matrix.c:467
#define W_M_TEXT
Definition: matrix.h:47
ANN_vector_matrix W
Definition: ann.h:95
void * w_priv
Definition: ann.h:81
w_write_f w_write
Definition: ann.h:79
double eta
Definition: ann.h:91
int N_output
Definition: ann.h:85
#define W_M_BIN
Definition: matrix.h:48
#define ANN_W_A_TEXT
Definition: ann.h:48
#define ANN_W_A_BIN
Definition: ann.h:49
matrix output_scale
Definition: ann.h:102
int r
Definition: ann.h:88
int N_layer
Definition: ann.h:86
int N_input
Definition: ann.h:84
int * N_neuron
Definition: ann.h:87
#define W_F_BIN
void ANN_error(ann_res_t error, char *string)
Definition: ann.c:900
Definition: ann.h:56
#define W_F_TEXT
double rho
Definition: ann.h:92
matrix input_scale
Definition: ann.h:101

Here is the call graph for this function: