net.jl 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. """
  2. Author: Sebastian Vendt, University of Ulm
  3. """
  4. using Flux, Statistics
  5. using Flux: onecold
  6. using BSON
  7. using Dates
  8. using Printf
  9. using NNlib
  10. include("./dataManager.jl")
  11. using .dataManager: make_batch
  12. using Logging
  13. import LinearAlgebra: norm
  14. norm(x::TrackedArray{T}) where T = sqrt(sum(abs2.(x)) + eps(T))
  15. ######################
  16. # PARAMETERS
  17. ######################
  18. const batch_size = 100
  19. const momentum = 0.9f0
  20. const lambda = 0.0005f0
  21. init_learning_rate = 0.1f0
  22. learning_rate = init_learning_rate
  23. const epochs = 100
  24. const decay_rate = 0.1f0
  25. const decay_step = 40
  26. const usegpu = true
  27. const printout_interval = 5
  28. const save_interval = 25
  29. const time_format = "HH:MM:SS"
  30. data_size = (50, 1) # MNIST is using 28, 28
  31. # ARCHITECTURE
  32. inputDense1
  33. inputDense2
  34. inputDense3
  35. classes = 2
  36. # enter the datasets and models you want to train
  37. dataset_folderpath = "../MATLAB/TrainingData/"
  38. const model_save_location = "../trainedModels/"
  39. const log_save_location = "../logs/"
  40. function adapt_learnrate(epoch_idx)
  41. return init_learning_rate * decay_rate^(epoch_idx / decay_step)
  42. end
  43. if usegpu
  44. using CuArrays
  45. end
  46. model = Chain(
  47. Conv(kernel, channels=>features, relu, pad=map(x -> x ÷ 2, kernel)),
  48. MaxPool(pooldims1, stride=()),
  49. Conv(relu, pad=map(x -> x ÷ 2, kernel)),
  50. MaxPool(),
  51. Conv(relu, pad=map(x -> x ÷ 2, kernel)),
  52. MaxPool(),
  53. flatten,
  54. Dense(inputDense1, inputDense2, σ),
  55. Dense(inputDense2, inputDense3, σ),
  56. Dense(inputDense3, classes) # identity to output coordinates!
  57. )