training: optimizer: $optimizers.adam optimizer2: $optimizers.adam(lr=0.2) optimizer3($optimizers.adam): params: lr: 0.3 wd: 0.1