import tensorflow as tf
import tensorflow.keras
tensorflow.keras.__version__
from tensorflow.keras import models
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import numpy as np
import matplotlib.pyplot as plt
model = load_model('E:/zbx_code/plantimg.h5')
model.summary() # As a reminder.
img_path = 'E:/plant_img/resize/orig_plant_img_percent70_resize_224/potato_resize_224/1.jpg'
# We preprocess the image into a 4D tensor
img = image.load_img(img_path, target_size=(224, 224))
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
# Remember that the model was trained on inputs
# that were preprocessed in the following way:
img_tensor /= 255.
# Its shape is (1, 150, 150, 3)
print(img_tensor.shape)
# plt.imshow(img_tensor[0])
# plt.show()
# Extracts the outputs of the top 8 layers:
layer_outputs = [layer.output for layer in model.layers[:8]]
# Creates a model that will return these outputs, given the model input:
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
# This will return a list of 5 Numpy arrays:
# one array per layer activation
activations = activation_model.predict(img_tensor)
# first_layer_activation = activations[0]
# print(first_layer_activation.shape)
# # plt.matshow(first_layer_activation[0, :, :, 3], cmap='viridis')
# # plt.show()
# plt.matshow(first_layer_activation[0, :, :, 2], cmap='viridis')
# plt.show()
# These are the names of the layers, so can have them as part of our plot
layer_names = []
for layer in model.layers[:8]:
layer_names.append(layer.name)
images_per_row = 16
# Now let's display our feature maps
for layer_name, layer_activation in zip(layer_names, activations):
# This is the number of features in the feature map
n_features = layer_activation.shape[-1]
# The feature map has shape (1, size, size, n_features)
size = layer_activation.shape[1]
# We will tile the activation channels in this matrix
n_cols = n_features // images_per_row
display_grid = np.zeros((size * n_cols, images_per_row * size))
print(type(display_grid))
# We'll tile each filter into this big horizontal grid
for col in range(n_cols):
for row in range(images_per_row):
channel_image = layer_activation[0,:, :,col * images_per_row + row]
# Post-process the feature to make it visually palatable
channel_image -= channel_image.mean()
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
print(channel_image)
display_grid[col * size : (col + 1) * size,
row * size : (row + 1) * size] = channel_image
# print(display_grid.shape[0])
# Display the grid
scale = 1. / size
# print(scale*display_grid[0])
plt.figure(figsize=(int(scale * (display_grid.shape[1])),
int(scale * display_grid.shape[1])))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='equal', cmap='viridis')
plt.savefig('E:/featureimg/' + layer_name + '.jpg')
plt.show()
在测试这个代码的过程中,出现了以下几个问题:
(1) plt.figure(figsize=(int(scale * (display_grid.shape[1])),int(scale * display_grid.shape[0])))原始代码是这一句,用在我的图像会报错,修改成 plt.figure(figsize=(int(scale * (display_grid.shape[1])),int(scale * display_grid.shape[1])))就不报错了
Traceback (most recent call last): File "E:\zbx_code\预训练\feature_vision.py", line 84, in <module> int(scale * display_grid.shape[0]))) File "D:\anaconda3\envs\tensorflow\lib\site-packages\matplotlib\pyplot.py", line 546, in figure **kwargs) File "D:\anaconda3\envs\tensorflow\lib\site-packages\matplotlib\backend_bases.py", line 3357, in new_figure_manager fig = fig_cls(*args, **kwargs) File "D:\anaconda3\envs\tensorflow\lib\site-packages\matplotlib\figure.py", line 349, in __init__ raise ValueError('figure size must be positive finite not ' ValueError: figure size must be positive finite not (16, 0)
(2) plt.imshow(display_grid, aspect='equal', cmap='viridis')这个代码中aspect是控制纵横比的,设置为equal图像会显示的大小一样,如果设置为auto图像的宽高比是不一样的。cmap是设置颜色图谱的,设置为viridis是设置为红绿蓝色。