autoencoder.jl 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. using Flux
  2. using CuArrays
  3. using Printf
  4. include("./dataManager.jl")
  5. include("./verbose.jl")
  6. using .verbose
  7. using .dataManager: make_batch
  8. using FeedbackNets
  9. dataset_folderpath = "../MATLAB/TrainingData/"
  10. dataset_name = "2019_09_09_1658"
  11. hidden1 = 150
  12. hidden2 = 80
  13. epochs = 100
  14. train = make_batch(dataset_folderpath, "$(dataset_name)_TRAIN.mat", normalize_data=false, truncate_data=false)
  15. val = make_batch(dataset_folderpath, "$(dataset_name)_VAL.mat", normalize_data=false, truncate_data=false)
  16. test = make_batch(dataset_folderpath, "$(dataset_name)_TEST.mat", normalize_data=false, truncate_data=false)
  17. train = gpu.(train)
  18. val = gpu.(val)
  19. test = gpu.(test)
  20. model = Chain(
  21. # encoding
  22. flatten,
  23. Dense(288, hidden1, relu),
  24. Dense(hidden1, hidden2, relu),
  25. Dense(hidden2, 2),
  26. # decoding
  27. Dense(2, hidden2, relu),
  28. Dense(hidden2, hidden1),
  29. Dense(hidden1, 288, relu),
  30. )
  31. model = model |> gpu
  32. loss(x, y) = Flux.mse(model(x), flatten(x))
  33. function loss(dataset)
  34. loss_val = 0.0f0
  35. for (data, labels) in dataset
  36. loss_val += loss(data, labels)
  37. end
  38. return loss_val / length(dataset)
  39. end
  40. const moment = 0.9f0
  41. const decay_rate = 0.1f0
  42. const decay_step = 40
  43. const init_learning_rate = 0.3f0
  44. function adapt_learnrate(epoch_idx)
  45. return init_learning_rate * decay_rate^(epoch_idx / decay_step)
  46. end
  47. opt = Momentum(init_learning_rate, moment)
  48. @tprintf("INIT: Loss: %f\n", loss(train))
  49. for i in 1:epochs
  50. Flux.train!(loss, params(model), train, opt)
  51. @tprintf("Epoch %i: Loss: %f\n", i, loss(train))
  52. opt.eta = adapt_learnrate(i)
  53. end