skip to Main Content

I want to train a VGG19 with my own data for binary classification. My data consists of .jpg images and 2 columns in a pandas dataframe (id for the filename of the image and T1 for its label 0f 0 or 1). However, after successfully training and storing the model, I want to load it and encounter an error. What could I do to solve this? In GitHub discussions people have been dealing with this since 2020 and I haven’t found a solution that worked for this case, I have also tried saving it as a .json and .h5 format but didn’t work.

Python: 3.10
keras: 3.1.1
tensorflow: 2.16.2
OS: Ubuntu 22.04

For the model training and saving:

import os
import pandas as pd
import tensorflow as tf
from keras import losses
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.optimizers import Adam
from keras.metrics import Precision, Recall
from keras.applications.vgg19 import VGG19

IMG_HEIGHT = 224
IMG_WIDTH = 224
MODELS_INPUT_SHAPE = (IMG_HEIGHT, IMG_WIDTH, 3)
NUM_CLASSES = 2
EPOCHS = 1
BATCH_SIZE = 32

train_dataset = pd.read_csv('data/train-data.csv')
train_images_location = 'data/train-images/'

#Auxiliar function to prepare the images.
def preprocess_image(image):
    image = tf.io.read_file(image)
    image = tf.image.decode_jpeg(image, channels=3)
    image = tf.image.resize(image, [224, 224])
    image = tf.cast(image, tf.float32) / 255.0  # Normalize pixel values to [0, 1]
    return image

train_image_paths = [os.path.join('data/train-images', img_path) for img_path in train_dataset['id']]
one_hot_labels = tf.one_hot(train_dataset['T1'], NUM_CLASSES)
train_dataset = tf.data.Dataset.from_tensor_slices((train_image_paths, one_hot_labels))
train_dataset = train_dataset.map(lambda x, y: (preprocess_image(x), y))
train_dataset = train_dataset.batch(BATCH_SIZE)

models = {
    'vgg19_binary.keras': VGG19(weights='imagenet', include_top=False, input_shape=MODELS_INPUT_SHAPE)
   
}

for model_name in models.keys():

  base_model = models[model_name]

  for layer in base_model.layers:
      layer.trainable = False

  model = Sequential()
  model.add(base_model)
  model.add(Flatten())
  model.add(Dense(256, activation='relu'))
  model.add(Dense(128, activation='relu'))
  model.add(Dense(64, activation='relu'))
  model.add(Dense(32, activation='relu'))
  model.add(Dense(16, activation='relu'))
  model.add(Dense(NUM_CLASSES, activation='sigmoid'))
  model.compile(loss=losses.BinaryCrossentropy(),
                  optimizer=Adam(learning_rate=0.0001),
                  metrics=
                  ['accuracy',
                    Precision(),
                    Recall()])

  model.fit(train_dataset, epochs=EPOCHS)

      # Evaluate the model on the testing data

  model_location = 'models/'+model_name
  model.save(model_location)

For the model loading:

from keras.models import load_model

vgg19_model_location = 'models/vgg19_binary.keras'
vgg19_model = load_model(vgg19_model_location)

And the whole error:

Traceback (most recent call last):
  File "/home/javier/PycharmProjects/thesis/thesis/binary_early_fusion.py", line 15, in <module>
    loaded_model = model_from_json(loaded_model_json)
  File "/home/javier/PycharmProjects/thesis/venv/lib/python3.10/site-packages/keras/src/models/model.py", line 586, in model_from_json
    return serialization_lib.deserialize_keras_object(
  File "/home/javier/PycharmProjects/thesis/venv/lib/python3.10/site-packages/keras/src/saving/serialization_lib.py", line 711, in deserialize_keras_object
    instance = cls.from_config(inner_config)
  File "/home/javier/PycharmProjects/thesis/venv/lib/python3.10/site-packages/keras/src/models/sequential.py", line 335, in from_config
    model.add(layer)
  File "/home/javier/PycharmProjects/thesis/venv/lib/python3.10/site-packages/keras/src/models/sequential.py", line 116, in add
    self._maybe_rebuild()
  File "/home/javier/PycharmProjects/thesis/venv/lib/python3.10/site-packages/keras/src/models/sequential.py", line 135, in _maybe_rebuild
    self.build(input_shape)
  File "/home/javier/PycharmProjects/thesis/venv/lib/python3.10/site-packages/keras/src/layers/layer.py", line 223, in build_wrapper
    original_build_method(*args, **kwargs)
  File "/home/javier/PycharmProjects/thesis/venv/lib/python3.10/site-packages/keras/src/models/sequential.py", line 176, in build
    x = layer(x)
  File "/home/javier/PycharmProjects/thesis/venv/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py", line 122, in error_handler
    raise e.with_traceback(filtered_tb) from None
  File "/home/javier/PycharmProjects/thesis/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/flatten.py", line 72, in compute_output_spec
    output_shape = self.compute_output_shape(inputs.shape)
AttributeError: Exception encountered when calling Flatten.call().

'list' object has no attribute 'shape'

Arguments received by Flatten.call():
  • args=(['<KerasTensor shape=(None, 7, 7, 512), dtype=float32, sparse=False, name=keras_tensor_70>'],)
  • kwargs=<class 'inspect._empty'>

2

Answers


  1. Chosen as BEST ANSWER

    In order to fix this, I had to rewrite it and user other approaches:

    I changed to image_dataset_from_directory to get the images and their labels in a dataset. And finally, I changed how I built the model. Now I specify originally the type of model and add the layers.

    Unfortunately, I ignore why these changes have worked (I admit my ignorance on this topic). I hope this answer can help you.

    import os
    import numpy as np
    import pandas as pd
    import tensorflow as tf
    from keras import losses
    from keras.models import Model
    from keras.models import Sequential
    from keras.utils import image_dataset_from_directory
    from keras.layers import Dense, Dropout, Flatten, Rescaling, Input
    from keras.optimizers import Adam
    from keras.metrics import Precision, Recall
    from keras.applications.vgg19 import VGG19
    from keras.applications.inception_v3 import InceptionV3
    from keras.applications.resnet50 import ResNet50
    from keras.applications.efficientnet_v2 import EfficientNetV2S
    
    IMG_HEIGHT = 224
    IMG_WIDTH = 224
    MODELS_INPUT_SHAPE = (IMG_HEIGHT, IMG_WIDTH, 3)
    NUM_CLASSES = 2
    EPOCHS = 1
    BATCH_SIZE = 32
    
    train_dataframe = pd.read_csv('data/train-data.csv')
    train_images_location = 'data/train-images/'
    
    labels = []
    #image_dataset_from_directory uses os.walk(directory) to obtain the filenames. labels then have to be sorted according to that, otherwise they get scrambled.
    for root, directories, images in os.walk(train_images_location):
        for image in images:
            labels.append(int(train_dataframe[train_dataframe['id'] == image]['T1']))
    
    labels = list(np.array(labels, dtype=np.float32))
    
    train_dataset = image_dataset_from_directory(
        train_images_location,
        labels=labels,
        label_mode="binary",
        class_names=None,
        color_mode='rgb',
        batch_size=BATCH_SIZE,
        image_size=(IMG_HEIGHT, IMG_HEIGHT),
        shuffle=True,
    )
    
    models = {
        'vgg19_binary.keras': VGG19(weights='imagenet', include_top=False, input_shape=MODELS_INPUT_SHAPE)
        #'inceptionv3_binary.keras': InceptionV3(include_top=False, input_shape=MODELS_INPUT_SHAPE),
        #'resnet50_binary.keras': ResNet50(include_top=False, input_shape=MODELS_INPUT_SHAPE),
        #'efficientnetv2s_binary.keras': EfficientNetV2S(include_top=False, input_shape=MODELS_INPUT_SHAPE)
    }
    
    for model_name in models.keys():
    
        base_model = models[model_name]
    
        for layer in base_model.layers:
            layer.trainable = False
    
        img_input = Input(shape=(IMG_HEIGHT, IMG_WIDTH, 3))
    
        x = Rescaling(1. / 255)(img_input)
        x = base_model(x)
        x = Flatten()(x)
        x = Dense(256, activation='relu')(x)
        x = Dense(128, activation='relu')(x)
        x = Dense(64, activation='relu')(x)
        x = Dense(32, activation='relu')(x)
        x = Dense(16, activation='relu')(x)
        x = Dense(8, activation='relu')(x)
    
        x = Dense(1, activation='sigmoid')(x)
    
        model = Model(img_input, x, name='vgg19_new_trained')
    
        model.compile(loss=losses.BinaryCrossentropy(),
                      optimizer=Adam(learning_rate=0.0001),
                      metrics=
                      ['accuracy',
                        Precision(),
                        Recall()])
    
        model.fit(train_dataset, epochs=EPOCHS, batch_size=BATCH_SIZE)
    
        model_location = 'models/'+model_name
        model.save(model_location)
    

  2. Flatten() works for a single tensor and it seems you’re handing it a list. That is why it says it cannot find ".shape" on a list.

    In order to fix this:
    Inspect your model’s shape..

    You have this (but add those two last lines):

    for layer in base_model.layers:
          layer.trainable = False
          layer_output = layer.output    
          print(layer.name, layer_output.shape)
    

    Look at the shape and ensure the layer_output fits your model’s input.

    Login or Signup to reply.
Please signup or login to give your own answer.
Back To Top
Search