autoencoder.jl 1.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. using Flux
  2. using CuArrays
  3. using Printf
  4. include("./dataManager.jl")
  5. include("./verbose.jl")
  6. using .verbose
  7. using .dataManager: make_batch
  8. using FeedbackNets
  9. dataset_folderpath = "../MATLAB/TrainingData/"
  10. dataset_name = "2019_09_09_1658"
  11. hidden1 = 150
  12. hidden2 = 80
  13. epochs = 50
  14. flatten(x) = reshape(x, :, size(x, ndims(x)))
  15. train = make_batch(dataset_folderpath, "$(dataset_name)_TRAIN.mat", normalize_data=false, truncate_data=false)
  16. val = make_batch(dataset_folderpath, "$(dataset_name)_VAL.mat", normalize_data=false, truncate_data=false)
  17. test = make_batch(dataset_folderpath, "$(dataset_name)_TEST.mat", normalize_data=false, truncate_data=false)
  18. train = gpu.(train)
  19. val = gpu.(val)
  20. test = gpu.(test)
  21. model = Chain(
  22. # encoding
  23. flatten,
  24. Dense(288, hidden1, relu),
  25. Dense(hidden1, hidden2, relu),
  26. Dense(hidden2, 2),
  27. # decoding
  28. Dense(2, hidden2, relu),
  29. Dense(hidden2, hidden1),
  30. Dense(hidden1, 288, relu),
  31. )
  32. model = model |> gpu
  33. loss(x, y) = Flux.mse(model(x), flatten(x))
  34. loss(x) = Flux.mse(model(x), flatten(x))
  35. opt = ADAM()
  36. for i in 1:epochs
  37. Flux.train!(loss, params(model), train, opt)
  38. @tprintf("Epoch %i: Loss: %f\n", i, loss(train[1][1]))
  39. end