線形回帰 解析
GradientDescentOptimizerのlearning_rateを変えてみる
# 勾配降下法
learning_rate = 0.01
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
...
# トレーニング回数
training_step = 5000
validation_step = 10
AdamOptimizer
# Optimizer
train_op = tf.train.AdamOptimizer().minimize(loss)
...
# トレーニング回数
training_step = 10000
validation_step = 10
AdadeltaOptimizer
# Optimizer
learning_rate = 0.5
train_op = tf.train.AdadeltaOptimizer(learning_rate).minimize(loss)
...
# トレーニング回数
training_step = 10000
validation_step = 10
AdagradOptimizer
# Optimizer
learning_rate = 0.025
train_op = tf.train.AdagradOptimizer(learning_rate).minimize(loss)
...
# トレーニング回数
training_step = 10000
validation_step = 10
MomentumOptimizer
# Optimizer
learning_rate = 0.01
momentum_rate = 0.01
train_op = tf.train.MomentumOptimizer(learning_rate, momentum_rate).minimize(loss)
...
# トレーニング回数
training_step = 10000
validation_step = 10