Error in number of inputs when using CustomDataGenerator with Keras model

32 Views Asked by At

I am trying to create a Keras model which takes two separate pieces of information at different stages (an image first, then concatenates two coordinates at the point of fully-connected layers).

When I run my code, I am stuck at an error:

"ValueError: Layer "model" expects 2 input(s), but it received 3 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, None, None) dtype=uint8>, <tf.Tensor 'IteratorGetNext:1' shape=() dtype=string>, <tf.Tensor 'IteratorGetNext:2' shape=() dtype=string>]"

Here is the code that gives me this error:

from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.layers import Dense, Flatten, Input, concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.utils import plot_model
from tensorflow.keras.utils import Sequence
import random
import pydot
import graphviz
import csv
import cv2

class CustomDataGenerator(Sequence):
    def __init__(self, image_filenames, coordinates, labels, batch_size):
        #Get items in a list format
        self.image_filenames = image_filenames
        self.coordinates = coordinates
        self.labels = labels
        self.batch_size = batch_size

    def __len__(self):
        return len(self.image_filenames) // self.batch_size

    def __getitem__(self, index):
        batch_image_filenames = self.image_filenames[index * self.batch_size : (index + 1) * self.batch_size]
        batch_coordinates = self.coordinates[index * self.batch_size : (index + 1) * self.batch_size]
        batch_labels = self.labels[index * self.batch_size : (index + 1) * self.batch_size]

        batch_images = [cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) for filename in batch_image_filenames]
        batch_coordinates = [coordinate for coordinate in batch_coordinates]

        return [batch_images, batch_coordinates], batch_labels

# Load the pre-trained Inception V3 model without the top (fully connected) layers
base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=(299, 299, 3))

# Create separate input layers for the scalar coordinates
input_coordinates = Input(shape=(2,))

# Flatten the output from the Inception V3 model and concatenate it with the input_coordinates
flat_output = base_model.output
flat_output = Flatten()(flat_output)
concatenated = concatenate([flat_output, input_coordinates])

num_classes = 3

# Add additional fully-connected layers
fc1 = Dense(512, activation='relu')(concatenated)
fc2 = Dense(256, activation='relu')(fc1)
output = Dense(num_classes, activation='softmax')(fc2)  # Replace num_classes with the number of classes in your classification problem

# Create the final model
model = Model(inputs=[base_model.input, input_coordinates], outputs=output)

# Compile the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

# Print the model summary
model.summary()

# Note: these all work as intended, reduced for minimal example reproduction
train_image_filenames = names[0: int(len(names)*0.8)]
val_image_filenames = names[int(len(names)*0.8):]
train_coordinates = coord[0: int(len(coord)*0.8)]
val_coordinates = coord[int(len(coord)*0.8):]
train_labels = labels[0: int(len(labels)*0.8)]
val_labels = labels[int(len(labels)*0.8):]
batch_size = 1


train_generator = CustomDataGenerator(train_image_filenames, train_coordinates, train_labels, batch_size)
val_generator = CustomDataGenerator(val_image_filenames, val_coordinates, val_labels, batch_size)

model.fit(train_generator, epochs=10, validation_data=val_generator)

Any idea what I may be doing wrong here? Thanks.

1

There are 1 best solutions below

0
ROS On

The problem is resolved after casting each of the components being returned (batch_images, batch_coordinates, and batch_labels) as a numpy array.