# python人工智能tensorflow优化器Optimizer算法汇总

## tensorflow常见的Optimizer

### 1 梯度下降法

`tf.train.GradientDescentOptimizer(learning_rate, use_locking=False, name='GradientDescent')`

`tf.train.AdagradOptimizer(learning_rate, initial_accumulator_value=0.1, use_locking=False,name='Adagrad')`

### 3 动量优化法

`tf.train.MomentumOptimizer.__init__(learning_rate, momentum, use_locking=False, name='Momentum', use_nesterov=False)`

### 4 RMSProp算法

`tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.0, epsilon=1e-10, use_locking=False, name='RMSProp')`

RMSProp算法在经验上已经被证明是一种有效且实用的深度神经网络优化算法。目前它是深度学习从业者经常采用的优化方法之一。

```tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name='Adam')
```

## 例子

```import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data",one_hot = "true")
def add_layer(inputs,in_size,out_size,n_layer,activation_function = None):
layer_name = 'layer%s'%n_layer
with tf.name_scope(layer_name):
with tf.name_scope("Weights"):
Weights = tf.Variable(tf.random_normal([in_size,out_size]),name = "Weights")
tf.summary.histogram(layer_name+"/weights",Weights)
with tf.name_scope("biases"):
biases = tf.Variable(tf.zeros([1,out_size]) + 0.1,name = "biases")
tf.summary.histogram(layer_name+"/biases",biases)
with tf.name_scope("Wx_plus_b"):
Wx_plus_b = tf.matmul(inputs,Weights) + biases
tf.summary.histogram(layer_name+"/Wx_plus_b",Wx_plus_b)
if activation_function == None :
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
tf.summary.histogram(layer_name+"/outputs",outputs)
return outputs
def compute_accuracy(x_data,y_data):
global prediction
y_pre = sess.run(prediction,feed_dict={xs:x_data})
correct_prediction = tf.equal(tf.arg_max(y_data,1),tf.arg_max(y_pre,1))     #判断是否相等
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))   #赋予float32数据类型，求平均。
result = sess.run(accuracy,feed_dict = {xs:batch_xs,ys:batch_ys})   #执行
return result
xs = tf.placeholder(tf.float32,[None,784])
ys = tf.placeholder(tf.float32,[None,10])
layer1 = add_layer(xs,784,150,"layer1",activation_function = tf.nn.tanh)
with tf.name_scope("loss"):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=ys,logits = prediction),name = 'loss')
#label是标签，logits是预测值，交叉熵。
tf.summary.scalar("loss",loss)
init = tf.initialize_all_variables()
merged = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(init)
write = tf.summary.FileWriter("logs/",sess.graph)
for i in range(5001):
batch_xs,batch_ys = mnist.train.next_batch(100)
sess.run(train,feed_dict = {xs:batch_xs,ys:batch_ys})
if i % 1000 == 0:
print("训练%d次的识别率为：%f。"%((i+1),compute_accuracy(mnist.test.images,mnist.test.labels)))
result = sess.run(merged,feed_dict={xs:batch_xs,ys:batch_ys})
```

`train = tf.train.GradientDescentOptimizer(0.2).minimize(loss)`

### 1 梯度下降法

`train = tf.train.GradientDescentOptimizer(0.2).minimize(loss)`

`train = tf.train.AdagradOptimizer(0.1).minimize(loss)`

### 3 动量优化法

`train = tf.train.MomentumOptimizer(learning_rate=0.05, momentum=0.9).minimize(loss)`

### 4 RMSProp算法

`train = tf.train.RMSPropOptimizer(0.01).minimize(loss)`

`train = tf.train.AdamOptimizer(0.004).minimize(loss)`