Keras:使用灰度蒙版和ImageDataGenerator类进行图像分割
我目前正在尝试使用Keras 2.1.6(以TensorFlow作为后端)及其ImageDataGenerator
来实现卷积网络,以使用灰度蒙版对图像进行分割.我尝试使用图像作为输入,并使用遮罩作为标签.由于训练图像数量少,并且内存不足,因此我使用了Keras中提供的ImageDataGenerator
类.
I am currently trying to implement a convolutional network using Keras 2.1.6 (with TensorFlow as backend) and its ImageDataGenerator
to segment an image using a grayscale mask. I try to use an image as input, and a mask as label. Due to a low amount of training images, and memory constraints I utilize the ImageDataGenerator
class provided in Keras.
但是,在将Keras示例中提供的值更改为稍后描述的值之后,我得到了此错误:
However I get this error, after changing the values provided in the Keras example to the ones described later:
File "C:\Users\XXX\Anaconda3\lib\site-packages\keras\engine\training.py", line 2223, in fit_generator
batch_size = x.shape[0]
AttributeError: 'tuple' object has no attribute 'shape'
据我所知,发生这种情况的原因是生成器确实生成了元组而不是数组.这是在我将以下参数从Keras示例中提供的标准值更改为以下参数后第一次发生的:对于所有掩码生成器,为color_mode='grayscale'
,对于自动编码器建议为class_mode='input'
.
Which, as far as I know, happens because the generator does generate a tuple, and not an array. This first happened after I changed following parameters from the standard values provided in the Keras example to the following: color_mode='grayscale'
for all mask generators, and class_mode='input'
due to this being recommended for autoencoders.
可在此处中找到Keras示例.
The Keras example can be found in here.
我正在使用的数据集由100张图像(jpg
)和100个相应的灰度蒙版(png
)组成,可以在
The dataset I am using consists of 100 images (jpg
) and 100 corresponding grayscale masks (png
) and can be downloaded at this link
我要实现的体系结构是一个基于自动编码器/U-Net的网络,并在提供的代码中显示:
The architecture I wanted to implement is an autoencoder/U-Net based network and it is shown in the provided code:
from keras.preprocessing import image
from keras.models import Model
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
from keras import initializers
image_path =
mask_path =
valid_image_path =
valid_mask_path =
img_size=160
batchsize=10
samplesize = 60
steps = samplesize / batchsize
train_datagen = image.ImageDataGenerator(shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
data_gen_args = dict(rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
seed = 1
image_generator = image_datagen.flow_from_directory(
image_path,
target_size=(img_size, img_size),
class_mode='input',
batch_size = batchsize,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
mask_path,
target_size=(img_size, img_size),
class_mode='input',
color_mode = 'grayscale',
batch_size = batchsize,
seed=seed)
vimage_generator = image_datagen.flow_from_directory(
valid_image_path,
target_size=(img_size, img_size),
class_mode='input',
batch_size = batchsize,
seed=seed)
vmask_generator = mask_datagen.flow_from_directory(
valid_mask_path,
target_size=(img_size, img_size),
class_mode='input',
color_mode = 'grayscale',
batch_size = batchsize,
seed=seed)
#Model
input_img = Input(shape=(img_size,img_size,3))
c11 = Conv2D(16, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(input_img)
mp1 = MaxPooling2D((2, 2), padding='same')(c11)
c21 = Conv2D(16, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(mp1)
mp2 = MaxPooling2D((2, 2), padding='same')(c21)
c31 = Conv2D(32, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(mp2)
encoded = MaxPooling2D((5, 5), padding='same')(c31)
c12 = Conv2D(32, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(encoded)
us12 = UpSampling2D((5,5))(c12)
c22 = Conv2D(16, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(us12)
us22 = UpSampling2D((2, 2))(c22)
c32 = Conv2D(16, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(us22)
us32 = UpSampling2D((2, 2))(c32)
decoded = Conv2D(1, (3, 3), activation='softmax', padding='same')(us32)
model = Model(input_img, decoded)
model.compile(loss="mean_squared_error", optimizer=optimizers.Adam(),metrics=["accuracy"])
#model.summary()
#Generators, tr: training, v: validation
trgen = zip(image_generator,mask_generator)
vgen = zip(vimage_generator,vmask_generator)
model.fit_generator(
trgen,
steps_per_epoch= steps,
epochs=5,
validation_data = vgen,
validation_steps=10)
以下是Unet的更好版本,您可以使用此代码
def conv_block(tensor, nfilters, size=3, padding='same', initializer="he_normal"):
x = Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def deconv_block(tensor, residual, nfilters, size=3, padding='same', strides=(2, 2)):
y = Conv2DTranspose(nfilters, kernel_size=(size, size), strides=strides, padding=padding)(tensor)
y = concatenate([y, residual], axis=3)
y = conv_block(y, nfilters)
return y
def Unet(img_height, img_width, nclasses=3, filters=64):
# down
input_layer = Input(shape=(img_height, img_width, 3), name='image_input')
conv1 = conv_block(input_layer, nfilters=filters)
conv1_out = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_block(conv1_out, nfilters=filters*2)
conv2_out = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_block(conv2_out, nfilters=filters*4)
conv3_out = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_block(conv3_out, nfilters=filters*8)
conv4_out = MaxPooling2D(pool_size=(2, 2))(conv4)
conv4_out = Dropout(0.5)(conv4_out)
conv5 = conv_block(conv4_out, nfilters=filters*16)
conv5 = Dropout(0.5)(conv5)
# up
deconv6 = deconv_block(conv5, residual=conv4, nfilters=filters*8)
deconv6 = Dropout(0.5)(deconv6)
deconv7 = deconv_block(deconv6, residual=conv3, nfilters=filters*4)
deconv7 = Dropout(0.5)(deconv7)
deconv8 = deconv_block(deconv7, residual=conv2, nfilters=filters*2)
deconv9 = deconv_block(deconv8, residual=conv1, nfilters=filters)
# output
output_layer = Conv2D(filters=nclasses, kernel_size=(1, 1))(deconv9)
output_layer = BatchNormalization()(output_layer)
output_layer = Activation('softmax')(output_layer)
model = Model(inputs=input_layer, outputs=output_layer, name='Unet')
return model
请注意,如果只有两个类,即nclasses = 2,则需要更改
Note if you have only two classes ie nclasses=2, you need to change
output_layer = Conv2D(filters=nclasses, kernel_size=(1, 1))(deconv9)
output_layer = BatchNormalization()(output_layer)
output_layer = Activation('softmax')(output_layer)
到
output_layer = Conv2D(filters=2, kernel_size=(1, 1))(deconv9)
output_layer = BatchNormalization()(output_layer)
output_layer = Activation('sigmoid')(output_layer)
现在对于数据生成器,您可以使用内置的ImageDataGenerator类 这是Keras文档中的代码
Now for the data generators, you can use the builtin ImageDataGenerator class here is the code from Keras docs
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
另一种可行的方法是通过从Keras扩展Sequence类来实现自己的生成器
class seg_gen(Sequence):
def __init__(self, x_set, y_set, batch_size, image_dir, mask_dir):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.samples = len(self.x)
self.image_dir = image_dir
self.mask_dir = mask_dir
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
idx = np.random.randint(0, self.samples, batch_size)
batch_x, batch_y = [], []
drawn = 0
for i in idx:
_image = image.img_to_array(image.load_img(f'{self.image_dir}/{self.x[i]}', target_size=(img_height, img_width)))/255.
mask = image.img_to_array(image.load_img(f'{self.mask_dir}/{self.y[i]}', grayscale=True, target_size=(img_height, img_width)))
# mask = np.resize(mask,(img_height*img_width, classes))
batch_y.append(mask)
batch_x.append(_image)
return np.array(batch_x), np.array(batch_y)
以下是用于训练模型的示例代码
unet = Unet(256, 256, nclasses=66, filters=64)
print(unet.output_shape)
p_unet = multi_gpu_model(unet, 4)
p_unet.load_weights('models-dr/top_weights.h5')
p_unet.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
tb = TensorBoard(log_dir='logs', write_graph=True)
mc = ModelCheckpoint(mode='max', filepath='models-dr/top_weights.h5', monitor='acc', save_best_only='True', save_weights_only='True', verbose=1)
es = EarlyStopping(mode='max', monitor='acc', patience=6, verbose=1)
callbacks = [tb, mc, es]
train_gen = seg_gen(image_list, mask_list, batch_size)
p_unet.fit_generator(train_gen, steps_per_epoch=steps, epochs=13, callbacks=callbacks, workers=8)
当我只有2个班级使用骰子损失时,我获得了不错的成绩,这是它的代码
def dice_coeff(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss