I've written DeepLab V3 with a MobileNetV3 backbone:
import os
import tensorflow as tf
from keras.models import Model
from keras.applications import MobileNetV3Large
from keras.layers import (
Conv2D,
BatchNormalization,
Activation,
MaxPool2D,
Conv2DTranspose,
Concatenate,
Input,
)
from keras.layers import (
AveragePooling2D,
GlobalAveragePooling2D,
UpSampling2D,
Reshape,
Dense,
ReLU,
)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
def SqueezeAndExcite(inputs, ratio=8):
init = inputs
filters = init.shape[-1]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(
filters // ratio,
activation="relu",
kernel_initializer="he_normal",
use_bias=False,
)(se)
se = Dense(
filters, activation="sigmoid", kernel_initializer="he_normal", use_bias=False
)(se)
x = init * se
return x
def ASPP(inputs):
"""Image Pooling"""
shape = inputs.shape
y1 = AveragePooling2D(pool_size=(shape[1], shape[2]))(inputs)
y1 = Conv2D(256, 1, padding="same", use_bias=False)(y1)
y1 = BatchNormalization()(y1)
y1 = Activation("relu")(y1)
y1 = UpSampling2D((shape[1], shape[2]), interpolation="bilinear")(y1)
""" 1x1 conv """
y2 = Conv2D(256, 1, padding="same", use_bias=False)(inputs)
y2 = BatchNormalization()(y2)
y2 = Activation("relu")(y2)
""" 3x3 conv rate=6 """
y3 = Conv2D(256, 3, padding="same", use_bias=False, dilation_rate=6)(inputs)
y3 = BatchNormalization()(y3)
y3 = Activation("relu")(y3)
""" 3x3 conv rate=12 """
y4 = Conv2D(256, 3, padding="same", use_bias=False, dilation_rate=12)(inputs)
y4 = BatchNormalization()(y4)
y4 = Activation("relu")(y4)
""" 3x3 conv rate=18 """
y5 = Conv2D(256, 3, padding="same", use_bias=False, dilation_rate=18)(inputs)
y5 = BatchNormalization()(y5)
y5 = Activation("relu")(y5)
y = Concatenate()([y1, y2, y3, y4, y5])
y = Conv2D(256, 1, padding="same", use_bias=False)(y)
y = BatchNormalization()(y)
y = Activation("relu")(y)
return y
def deeplabv3_plus(trainable=False):
"""Encoder"""
shape = (224, 224, 3)
inputs = Input(shape)
encoder = MobileNetV3Large(
weights="imagenet",
include_top=False,
input_tensor=inputs,
minimalistic=True,
)
image_features = encoder.get_layer("expanded_conv_11/project/BatchNorm").output
x_b = encoder.get_layer("expanded_conv_2/project/BatchNorm").output
for layer in encoder.layers: # Freeze the layers
layer.trainable = trainable
x_a = ASPP(image_features)
x_a = UpSampling2D((4, 4), interpolation="bilinear")(x_a)
x_b = Conv2D(filters=48, kernel_size=1, padding="same", use_bias=False)(x_b)
x_b = BatchNormalization()(x_b)
x_b = Activation("relu")(x_b)
x = Concatenate()([x_a, x_b])
x = SqueezeAndExcite(x)
x = Conv2D(filters=256, kernel_size=3, padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=256, kernel_size=3, padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = SqueezeAndExcite(x)
x = UpSampling2D((4, 4), interpolation="bilinear")(x)
x = Conv2D(1, 1)(x)
x = Activation("sigmoid")(x)
model = Model(inputs, x)
return model
if __name__ == "__main__":
model = deeplabv3_plus()
model.summary()
model.save("model.h5")
When I saved model and tried convert it from Keras to TFJS
!tensorflowjs_converter \
--input_format=keras \
--output_format=tfjs_graph_model \
--saved_model_tags=serve \
--quantize_float16=* \
model.h5 \
model_float16
I got an error
WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. model.compile_metrics will be empty until you train or evaluate the model. WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op while saving (showing 5 of 46). These functions will not be directly callable after loading. WARNING:tensorflow:Didn't find expected Conv2D or DepthwiseConv2dNative input to 'StatefulPartitionedCall/model_1/batch_normalization_11/FusedBatchNormV3' WARNING:tensorflow:Didn't find expected Conv2D or DepthwiseConv2dNative input to 'StatefulPartitionedCall/model_1/batch_normalization_11/FusedBatchNormV3' WARNING:tensorflow:Didn't find expected Conv2D or DepthwiseConv2dNative input to 'StatefulPartitionedCall/model_1/batch_normalization_12/FusedBatchNormV3' WARNING:tensorflow:Didn't find expected Conv2D or DepthwiseConv2dNative input to 'StatefulPartitionedCall/model_1/batch_normalization_12/FusedBatchNormV3' WARNING:tensorflow:Didn't find expected Conv2D or DepthwiseConv2dNative input to 'StatefulPartitionedCall/model_1/batch_normalization_13/FusedBatchNormV3' WARNING:tensorflow:Didn't find expected Conv2D or DepthwiseConv2dNative input to 'StatefulPartitionedCall/model_1/batch_normalization_13/FusedBatchNormV3' Traceback (most recent call last): File "/usr/local/bin/tensorflowjs_converter", line 8, in <module> sys.exit(pip_main()) File "/usr/local/lib/python3.10/dist-packages/tensorflowjs/converters/converter.py", line 827, in pip_main main([' '.join(sys.argv[1:])]) File "/usr/local/lib/python3.10/dist-packages/tensorflowjs/converters/converter.py", line 831, in main convert(argv[0].split(' ')) File "/usr/local/lib/python3.10/dist-packages/tensorflowjs/converters/converter.py", line 817, in convert _dispatch_converter(input_format, output_format, args, quantization_dtype_map, File "/usr/local/lib/python3.10/dist-packages/tensorflowjs/converters/converter.py", line 508, in _dispatch_converter dispatch_keras_h5_to_tfjs_graph_model_conversion( File "/usr/local/lib/python3.10/dist-packages/tensorflowjs/converters/converter.py", line 148, in dispatch_keras_h5_to_tfjs_graph_model_conversion tf_saved_model_conversion_v2.convert_tf_saved_model( File "/usr/local/lib/python3.10/dist-packages/tensorflowjs/converters/tf_saved_model_conversion_v2.py", line 976, in convert_tf_saved_model _convert_tf_saved_model(output_dir, saved_model_dir=saved_model_dir, File "/usr/local/lib/python3.10/dist-packages/tensorflowjs/converters/tf_saved_model_conversion_v2.py", line 851, in _convert_tf_saved_model optimized_graph = optimize_graph(frozen_graph, signature, File "/usr/local/lib/python3.10/dist-packages/tensorflowjs/converters/tf_saved_model_conversion_v2.py", line 204, in optimize_graph raise ValueError('Unsupported Ops in the model after optimization\n' + ValueError: Unsupported Ops in the model after optimization _FusedBatchNormEx
The reason of ValueError are BatchNormalization layers. I think I should work around this limitation by putting the operation
y = Conv2D(filters, kernels, padding="same", use_bias=False, dilation_rate=18)(inputs)
y = BatchNormalization()(y)
y = Activation("relu")(y)
into a custom Keras Pattern of Conv2D + FusedBatchNorm + Activation. Tell me if my assumption is correct, and if so, how to implement fusion in Keras?