1. dataagument function
img_data_gen_args = dict(
    rotation_range=45, 
                     width_shift_range=0.3,
                     height_shift_range=0.3,
                     shear_range=0.5,
                     zoom_range=0.3,
                     horizontal_flip=True,
                     vertical_flip=True)
                    #  fill_mode='reflect')
def dataagument( xtrain,ytrain,seed,batch_size):
    image_data_generator = ImageDataGenerator(**img_data_gen_args)
    train_generator = image_data_generator.flow(xtrain, ytrain,seed=seed, batch_size=batch_size)
    print( train_generator)
    for img, mask in  train_generator:
        yield (img, mask)

2.model training

mytrain=dataagument(xtrain,ytrain,99,64)
steps_per_epoch=len(xtrain)/64
history=model.fit(mytrain,validation_data=(xval,yval),steps_per_epoch=steps_per_epoch,
                    epochs=epochs,callbacks=callback_,verbose=2,shuffle=True)

  1. my question if l don't set proability of each agumentation, whether all the agumentation will be appiled to one image? l found using the dataagument by ImageDataGenerator produces higher accuarcy compared to dataagument using class like below.
class RandomScale:
    """ Randomly scale the numpy-arrays """

    def __init__(self, scale_range=(0.9, 1.1), p=0.5):
        self.scale_range = scale_range
        self.p = p

    def __call__(self, data_numpy, label_numpy):
        if random.random() < self.p:
            scale =np.random.random() * (self.scale_range[1] - self.scale_range[0]) + self.scale_range[0]
            print('scale',scale)
            # img_h, img_w, _ = data_numpy.shape
            img_h, img_w=11,11
            M_rotate = cv2.getRotationMatrix2D(
                (img_w / 2, img_h / 2), 0, scale)
            print(M_rotate)
            data_numpy = cv2.warpAffine(data_numpy, M_rotate, (img_w, img_h))
            print('rescale',data_numpy)
       

            return data_numpy, label_numpy
0

There are 0 best solutions below