# tensorflow学习5 -- CNN 图像分类

``````import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Flatten,Conv2D,MaxPool2D
from tensorflow.keras.datasets import mnist
# 所有图片整成28行28列单通道。再归一化
x_train,x_test=x_train.reshape([-1,28,28,1])/255.0,x_test.reshape([-1,28,28,1])/255.0
#### contruct the model
model=Sequential()
#### compile ; fit ; evaluate
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(x=x_train,y=y_train,batch_size=100,epochs=20,verbose=2)
model.evaluate(x=x_test,y=y_test,verbose=2)
``````

TensorFlow2—20行代码实现CNN分类图片的例子

# 先要把数据换成自己的。

## 先看看别人的数据是啥样的

``````print(x_train[0,20]) #这样是看到数据，第0张的第20行
import matplotlib.pyplot as plt
plt.imshow(x_train[0])
plt.show()
``````

## 读入文件夹

``````for i in os.listdir('C:/Users/WQuiet/Desktop/TFphoto/'):
fulldirct = os.path.join('C:/Users/WQuiet/Desktop/TFphoto/',i)
print(fulldirct)
``````

listdir只能读取绝对路径，而且文件夹里复制来的路径是C:\Users\WQuiet\Desktop\TFphoto\1

### 总之下面这个是能正确读入的

``````def read_img(path):
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
print(cate)# ['C:/Users/WQuiet/Desktop/TFphoto/0', 'C:/Users/WQuiet/Desktop/TFphoto/1']

``````

python中os.path.isdir()等函数的作用和用法

### 然后看一下读入的图片

``````for idx,folder in enumerate(cate):
for im in glob.glob(folder + '/*.bmp'):
print('images: %s' % (im))
plt.imshow(img)
plt.show()
``````

python中os.listdir( )函数读取文件夹

``````img=cv2.imread(im,cv2.IMREAD_GRAYSCALE)
print(img)
plt.imshow(img)
plt.show()
``````

python3读取图片并灰度化图片的四种方法(OpenCV、PIL.Image、TensorFlow方法)总结

【一集罗小黑战记之后】

plt.imshow(img,cmap=“gray”)能设定为按灰度图显示，不过如果img本身不是灰度图而是三通道图片的话，显示的还是彩色图。

``````def read_img(path):
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
# print(cate)# ['C:/Users/WQuiet/Desktop/TFphoto/0', 'C:/Users/WQuiet/Desktop/TFphoto/1']
for idx,folder in enumerate(cate):
for im in glob.glob(folder + '/*.bmp'):
print('images: %s' % (im))
# img = img[:, :, (2, 1, 0)] # RGB图像按BGR读入
print(img)
plt.imshow(img,cmap="gray")
plt.show()
``````

Python读取图像并显示灰度图

### 压缩图片大小

img=cv2.resize(img,(32,32))

img=img[180:350 , 300:550]

### 挨个保存并返回数据

``````def read_img(path):
imgs=[]
labels=[]
fpath=[]
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
for idx,folder in enumerate(cate):
for im in glob.glob(folder + '/*.bmp'):
img=img[180:350 , 300:550]# 切出ROI区域
img=cv2.resize(img,(32,32))# 压缩图片
imgs.append(img)
labels.append(idx) # 文件夹名就是分类标记 我记为0和1
fpath.append( im)# C:/Users/WQuiet/Desktop/TFphoto/0\13.bmp
return np.asarray(fpath, np.string_), np.asarray(imgs, np.uint8), np.asarray(labels, np.int8)

``````

### 划分数据

``````
print(data.shape)  # (122, 32, 32)
num_classes =2 # len(set(label))#计算有多少类图片: 一堆的0和1  放在set里，自动去重并且排序 但没必要，我知道只有两类，写个2省时间

num_example=data.shape[0]
arr = np.arange(num_example)# 把图片数量变成 [0,1……,总数]
np.random.shuffle(arr)# 打乱排序
data = data[arr]
label = label[arr]
fpaths = fpaths[arr]

# 80%训练集 20%测试集
ratio=0.8
s=np.int(num_example * ratio)
x_train = data[:s]
y_train = label[:s]
fpaths_train = fpaths[:s]
x_val = data[s:]
y_val = label[s:]
fpaths_test = fpaths[s:]
print(len(x_train),len(y_train),len(x_val),len(y_val))#97 97 25 25

``````

## 可以移花接木了

x_train,y_train,x_test,y_test 数据换成自己的之后。再把构建网络的第一层输入形状改成自己的图片大小和最后一层输出的种类数量改掉就行。

``````import glob
import os
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Flatten,Conv2D,MaxPool2D
import matplotlib.pyplot as plt

imgs=[]
labels=[]
fpath=[]
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
for idx,folder in enumerate(cate):
for im in glob.glob(folder + '/*.bmp'):
img=img[180:350 , 300:550]# 切出ROI区域
img=cv2.resize(img,(32,32))# 压缩图片
imgs.append(img)
labels.append(idx) #文件夹名就是分类标记 我记为0和1
fpath.append( im)# C:/Users/WQuiet/Desktop/TFphoto/0\13.bmp
return np.asarray(fpath, np.string_), np.asarray(imgs, np.uint8), np.asarray(labels, np.int8)

#print(data.shape)  # (122, 32, 32)
num_classes =2 # len(set(label))#计算有多少类图片: 一堆的0和1  放在set里，自动去重并且排序 但没必要，我知道只有两类，写个2省时间

num_example=data.shape[0]
arr = np.arange(num_example)# 把图片数量变成 [0,1……,总数]
np.random.shuffle(arr)# 打乱排序
data = data[arr]
label = label[arr]
fpaths = fpaths[arr]

# 80%训练集 20%测试集
ratio=0.8
s=np.int(num_example * ratio)
x_train = data[:s]
y_train = label[:s]
fpaths_train = fpaths[:s]
x_test = data[s:]
y_test = label[s:]
fpaths_test = fpaths[s:]
#print(len(x_train),len(y_train),len(x_test),len(y_test))#97 97 25 25

x_train,x_test=x_train.reshape([-1,32, 32,1])/255.0,x_test.reshape([-1,32, 32,1])/255.0
#### contruct the model
model=Sequential()
#### compile ; fit ; evaluate
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(x=x_train,y=y_train,batch_size=100,epochs=20,verbose=2)
model.evaluate(x=x_test,y=y_test,verbose=2)

``````

Epoch 1/20
1/1 - 0s - loss: 0.7157 - accuracy: 0.4227
Epoch 2/20
1/1 - 0s - loss: 0.6620 - accuracy: 0.5773
Epoch 3/20
1/1 - 0s - loss: 0.6217 - accuracy: 0.5773
Epoch 4/20
1/1 - 0s - loss: 0.5918 - accuracy: 0.5773
Epoch 5/20
1/1 - 0s - loss: 0.5684 - accuracy: 0.5773
Epoch 6/20
1/1 - 0s - loss: 0.5491 - accuracy: 0.5773
Epoch 7/20
1/1 - 0s - loss: 0.5322 - accuracy: 0.5773
Epoch 8/20
1/1 - 0s - loss: 0.5159 - accuracy: 0.5773
Epoch 9/20
1/1 - 0s - loss: 0.4978 - accuracy: 0.5773
Epoch 10/20
1/1 - 0s - loss: 0.4771 - accuracy: 0.5773
Epoch 11/20
1/1 - 0s - loss: 0.4533 - accuracy: 0.5773
Epoch 12/20
1/1 - 0s - loss: 0.4274 - accuracy: 0.5979
Epoch 13/20
1/1 - 0s - loss: 0.4001 - accuracy: 0.7423
Epoch 14/20
1/1 - 0s - loss: 0.3725 - accuracy: 0.9175
Epoch 15/20
1/1 - 0s - loss: 0.3451 - accuracy: 0.9794
Epoch 16/20
1/1 - 0s - loss: 0.3180 - accuracy: 0.9794
Epoch 17/20
1/1 - 0s - loss: 0.2908 - accuracy: 0.9897
Epoch 18/20
1/1 - 0s - loss: 0.2630 - accuracy: 0.9794
Epoch 19/20
1/1 - 0s - loss: 0.2347 - accuracy: 0.9794
Epoch 20/20
1/1 - 0s - loss: 0.2070 - accuracy: 0.9897

1/1 - 0s - loss: 0.2029 - accuracy: 1.0000

## 怎么看它识别的结果呢…

TF官网入门教程

### 绘制可视化窗口

``````class_names = ['bright', 'extinguish']# 写俩标签代替0和1
#输入 序号,预测概率,真实标签,小图片
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])

plt.imshow(img, cmap=plt.cm.binary)

predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
# 预测对了用蓝色,错了用红色
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
# 括号里是真实标签
``````

### 进行预测

``````probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])

predictions = probability_model.predict(x_test)
``````

predictions[0]是第一张图片的预测概率,print出来是[0.1353 0.8647]这样的

### 可视化

``````num_rows = 5
num_cols = 5
num_images = num_rows*num_cols
plt.figure(figsize=(2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, num_cols, i+1)
plot_image(i, predictions[i], y_test, x_test) # 子函数
plt.tight_layout()
plt.show()
``````

### 效果不错

``````class_names = ['bright', 'extinguish']
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])

plt.imshow(img, cmap=plt.cm.binary)

predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'

plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)

probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])

predictions = probability_model.predict(x_test)

num_rows = 5
num_cols = 5
num_images = num_rows*num_cols
plt.figure(figsize=(2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, num_cols, i+1)
plot_image(i, predictions[i], y_test, x_test)
plt.tight_layout()
plt.show()

``````

Epoch 20/20
1/1 - 0s - loss: 0.1488 - accuracy: 0.9897
1/1 - 0s - loss: 0.1388 - accuracy: 0.9600

# 保存模型

## 保存

save()既保持了模型的图结构，又保存了模型的参数。
save_weights()只保存了模型的参数，但并没有保存模型的图结构。所以它的size小很多,但读取模型的时候不仅要自己再写一遍一模一样的模型结构,还要写一模一样的原始参数
keras保存模型中的save()和save_weights()

``````model.save('saved_model/my_model')
``````

## 加载

### 替代模型建立的部分就好

``````new_model = tf.keras.models.load_model('saved_model/my_model')
loss, acc = new_model.evaluate(x=x_test,y=y_test,verbose=2)# 评估准确率
``````

### 报错

ValueError: Input 0 of layer sequential is incompatible with the
layer: expected ndim=4, found ndim=3. Full shape received: [None, 32,
32]

Keras报错：ValueError: Input 0 is incompatible with layer sequential expected shape=(None, None, 22),
ValueError : 层 lstm 的输入 0 与层不兼容:预期 ndim=3，发现 ndim=2.收到的完整形状:[无，18]

``````x_test=np.expand_dims(x_test, 3)# 在x_test的第三个位置插入一个维度
``````

numpy中expand_dims()函数详解

### 能用了

1/1 - 0s - loss: 0.0000e+00 - accuracy: 1.0000

``````import glob
import os
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Flatten,Conv2D,MaxPool2D
import matplotlib.pyplot as plt

imgs=[]
labels=[]
fpath=[]
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
for idx,folder in enumerate(cate):
for im in glob.glob(folder + '/*.bmp'):
img=img[180:350 , 300:550]# 切出ROI区域
img=cv2.resize(img,(32,32))# 压缩图片
imgs.append(img)
labels.append(idx) #文件夹名就是分类标记 我记为0和1
fpath.append( im)# C:/Users/WQuiet/Desktop/TFphoto/0\13.bmp
return np.asarray(fpath, np.string_), np.asarray(imgs, np.uint8), np.asarray(labels, np.int8)

#print(data.shape)  # (122, 32, 32)
num_classes =2 # len(set(label))#计算有多少类图片: 一堆的0和1  放在set里，自动去重并且排序 但没必要，我知道只有两类，写个2省时间

num_example=data.shape[0]
arr = np.arange(num_example)# 把图片数量变成 [0,1……,总数]
np.random.shuffle(arr)# 打乱排序
data = data[arr]
label = label[arr]
fpaths = fpaths[arr]

# 80%训练集 20%测试集
ratio=0.8
s=np.int(num_example * ratio)
x_train = data[:s]
y_train = label[:s]
fpaths_train = fpaths[:s]
x_test = data[s:]
y_test = label[s:]
fpaths_test = fpaths[s:]

x_test=np.expand_dims(x_test, 3)
print(x_test.shape , y_test.shape)

# x_train,x_test=x_train.reshape([-1,32, 32,1])/255.0,x_test.reshape([-1,32, 32,1])/255.0
# #### contruct the model
# model=Sequential()
# #### compile ; fit ; evaluate
# model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
#               metrics=['accuracy'])
# model.fit(x=x_train,y=y_train,batch_size=100,epochs=20,verbose=2)
# model.evaluate(x=x_test,y=y_test,verbose=2)# 评估准确率
# model.save('saved_model/my_model')
#
loss, acc = new_model.evaluate(x=x_test,y=y_test,verbose=2)# 评估准确率
# print('Restored model, accuracy: {:5.2f}%'.format(100 * acc))
#
# print(new_model.predict(x_test).shape)
#
# 进行预测
class_names = ['bright', 'extinguish']
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])

plt.imshow(img, cmap=plt.cm.binary)

predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'

plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)

probability_model = tf.keras.Sequential([new_model,
tf.keras.layers.Softmax()])

predictions = probability_model.predict(x_test)

num_rows = 5
num_cols = 5
num_images = num_rows*num_cols
plt.figure(figsize=(2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, num_cols, i+1)
plot_image(i, predictions[i], y_test, x_test)
plt.tight_layout()
plt.show()

``````

# 使用模型

``````import glob
import os
import numpy as np
import cv2
import tensorflow as tf

source = []
imgs=[]
fpath=[]
for im in glob.glob(path + '/*.bmp'):
source.append(img)
img=img[180:350 , 300:550]# 切出ROI区域
img=cv2.resize(img,(32,32))# 压缩图片
imgs.append(img)
fpath.append(im)# C:/Users/WQuiet/Desktop/TFphoto/0\13.bmp
return np.asarray(fpath, np.string_), np.asarray(imgs, np.uint8)

# print(fpaths)
data=np.expand_dims(data, 3)

class_names = ['bright', 'extinguish']

probability_model = tf.keras.Sequential([new_model,
tf.keras.layers.Softmax()])

figure_save_path = "bright"  # 这里创建了一个文件夹，如果依次创建不同文件夹，可以用name_list[i]
if not os.path.exists(figure_save_path):
os.makedirs(figure_save_path)  # 如果不存在目录figure_save_path，则创建
figure_save_path = "extinguish"  # 这里创建了一个文件夹，如果依次创建不同文件夹，可以用name_list[i]
if not os.path.exists(figure_save_path):
os.makedirs(figure_save_path)  # 如果不存在目录figure_save_path，则创建

predictions = probability_model.predict(data)
cunt = len(predictions[:, 0])
for i in range(cunt):
predicted_label = np.argmax(predictions[i])
# 指定图片保存路径
if predicted_label:
cv2.imwrite("E:/my_python_workplace/extinguish/"+str(i)+".bmp",source[i])
else:
cv2.imwrite("E:/my_python_workplace/bright/" + str(i) + ".bmp", source[i])

``````