How to display the number of objects in an image for single class?

540 Views Asked by At

I am new to programming and been learning the tutorial in google colab for algorithm in object detection. Below is the code used in google object detection API https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/object_detection_tutorial.ipynb , it using single shot detector to output images. I added a global variable "count" and for loop to count the number of objects with score more than 0.5. This will work in the case for single class detection. Checked the result with several images and it's printed the count value correctly as in this Number of dogs. Now, I want to display this number on images. As an example, in the images it added a row, "Number of people: {count value}" Please show me how to edit the code below to achieve the result.

 def show_inference(model, image_path):
  global count
  count=0
  # the array based representation of the image will be used later in order to prepare 
  the result image with boxes and labels on it.       
  image_np = np.array(Image.open(image_path))
  # Actual detection.
  output_dict = run_inference_for_single_image(model, image_np)
   # Visualization of the results of a detection.
  vis_util.visualize_boxes_and_labels_on_image_array(
  image_np,
  output_dict['detection_boxes'],
  output_dict['detection_classes'],
  output_dict['detection_scores'],
  category_index,
  instance_masks=output_dict.get('detection_masks_reframed', None),
  use_normalized_coordinates=True,
  line_thickness=8)
  display(Image.fromarray(image_np))
  for o in output_dict['detection_scores']:
   if o > 0.5:
   count=count+1
 print(count)
for image_path in TEST_IMAGE_PATHS:
 show_inference(detection_model, image_path)
2

There are 2 best solutions below

3
On

For your question, create boxes to count numbers you can do this easily by this example I use for object counters AI number counts.

Sample: Using basics boundary boxes with colors from Tensorflow, AI applications applied the same you can use YOLO or any NN. It as Bitmap drawing FN, you need to create an image over another image you can make it a collaboration.

def search_screen( image_cropped ):
    image_cropped = tf.keras.preprocessing.image.img_to_array( image_cropped )
    image_cropped = tf.cast( image_cropped, dtype=tf.float32 )
    width = image_cropped.shape[1]
    height = image_cropped.shape[0]
    channels = image_cropped.shape[2]
    box_sizes = 10
    n_boxes = 10
    
    object_position = [ 0, 0, 0 ]
    object_properties = [ 0, 0, 0, 0, 0 ]
    object_count = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
    
    global list_input_data
    global list_position_data
    global list_label
    global scores
    
    list_input_data = tf.zeros([ 1, 21, 21, 3 ]).numpy()
    list_position_data = tf.zeros([ 1, 3 ]).numpy()
    list_label = tf.zeros([ 1, 1 ]).numpy()
    
    list_input_data = list_input_data[-100:,-2100:,-2100:,-300:]
    list_input_data = tf.cast( list_input_data, dtype=tf.float32 ).numpy()
    list_position_data = list_position_data[-100:,-300:]
    list_position_data = tf.cast( list_position_data, dtype=tf.float32 ).numpy()
    list_label = list_label[-100:,-100:]
    list_label = tf.cast( list_label, dtype=tf.float32 ).numpy()
    
    global i_count
    
    for i in range(n_boxes):
        for j in range(n_boxes):
            cropped_image_cell_search = tf.image.crop_to_bounding_box(image_cropped, int( CROP_SIZE[0] / 10 ) * i, 
                    int( CROP_SIZE[1] / 10 ) * j, int( CROP_SIZE[0] / 10 ), int( CROP_SIZE[1] / 10 ) )
            
            
            
            left_to_right = tf.image.flip_left_right(cropped_image_cell_search)
            up_to_down = tf.image.flip_up_down(cropped_image_cell_search)
            
            left_to_right = tf.math.count_nonzero( cropped_image_cell_search - left_to_right, dtype=tf.dtypes.int64 ).numpy()
            
            if left_to_right == 0 :
                pass

            else :
            
                up_to_down = tf.math.count_nonzero( cropped_image_cell_search - up_to_down, dtype=tf.dtypes.int64 ).numpy()
                
                a_rot = tf.image.rot90( cropped_image_cell_search )
                a_rot = tf.constant( a_rot, shape=( 16, 21, 3) )
                picture_temp = tf.constant( cropped_image_cell_search, shape=( 21, 16, 3 ) )
                a_rot = tf.concat([ tf.zeros([ 5, 21, 3]), a_rot], axis=0 )
                b_rot = tf.concat([ picture_temp, tf.zeros([ 21, 5, 3])], axis=1 )
                diag = tf.math.count_nonzero( tf.math.subtract( a_rot, b_rot, name='subtract' ) ).numpy()
                
                if ( diag <= 565 and diag >= 500 and up_to_down <= 96 and left_to_right >= 70 and left_to_right <= 100 ):
                
                    object_position[0] = i * height
                    object_position[1] = j * width
                    object_properties = [ 0, 0, 0, 0, 0 ]
                    object_properties[0] = left_to_right
                    object_properties[1] = up_to_down
                    object_properties[2] = diag
                    object_properties[3] = 1
                    object_properties[4] = 1
                    
                    target_object = 9
                    prediction_scores = tf.ones( [ n_objects ] ) * 95.00
                    
                    object_properties = tf.constant( object_properties, shape=( 5, 1, 1 ), dtype=tf.float32 )
                    object_properties = tf.keras.layers.UpSampling1D( size=63 )( object_properties )
                    object_properties = tf.constant( object_properties, shape=( 21, 5, 3 ) )
                    input_data = tf.squeeze( cropped_image_cell_search )
                    input_data = tf.concat( [input_data, object_properties], axis=1 )
                    label = tf.constant( 9, dtype=tf.int64 ).numpy()
                    
                    list_input_data = tf.experimental.numpy.append( list_input_data, tf.constant( input_data, shape=(1, 21, 21, 3)), axis=0 )
                    list_position_data = tf.experimental.numpy.append( list_position_data, tf.constant( object_position, shape=(1, 3)), axis=0 )
                    list_label = tf.experimental.numpy.append( list_label, tf.constant( label, shape=(1, 1)), axis=0 )
                
                    Y_scope = float(( int( height / n_boxes ) * i ) / height )
                    Y_alise = float(( int( height / n_boxes ) * ( i + 1 ) ) / height )
                    X_scope = float(( int( width / n_boxes ) * j ) / width )
                    X_alise = float(( int( width / n_boxes ) * ( j + 1 ) ) / width )
                    boxes_custom_input = tf.constant([ Y_scope, X_scope, Y_alise, X_alise ], shape=(1, 1, 4))
                    colors = tf.constant([[0.0, 0.0, 0.0]])
                    image_cropped = tf.keras.preprocessing.image.img_to_array( tf.squeeze(image_cropped) / 256.0 )
                    image_cropped = tf.image.draw_bounding_boxes(tf.constant(image_cropped, shape=(1, IMAGE_SIZE[0], IMAGE_SIZE[1], IMAGE_SIZE[2]), dtype=tf.float32), boxes_custom_input, colors)
                    image_cropped = tf.keras.preprocessing.image.img_to_array( tf.squeeze(image_cropped) *  255.0 )
                
                
                elif ( left_to_right > 130 and up_to_down > 130 and diag > 600  ) :
                    i_count = i_count + 1
                    object_position[0] = i * height
                    object_position[1] = j * width
                    object_properties = [ 0, 0, 0, 0, 0 ]
                    object_properties[0] = left_to_right
                    object_properties[1] = up_to_down
                    object_properties[2] = diag
                    object_properties[3] = 1
                    object_properties[4] = 1
                    
                    if b_save_image_object :
                        file = "F:\\temp\\image_catagorize\\20220620\\{filename_1:n}_{filename_2:n}".format(filename_1 = i_count, filename_2 = diag) + ".png"
                        tf.keras.utils.save_img(
                                file, cropped_image_cell_search, data_format=None, file_format=None, scale=True )
                
                    target_object, prediction_scores, input_data, label = identity_target_objects( cropped_image_cell_search, object_position, object_properties, n_boxes * i + j )
                    list_input_data = tf.experimental.numpy.append( list_input_data, tf.constant( input_data, shape=(1, 21, 21, 3)), axis=0 )
                    list_position_data = tf.experimental.numpy.append( list_position_data, tf.constant( object_position, shape=(1, 3)), axis=0 )
                    list_label = tf.experimental.numpy.append( list_label, tf.constant( label, shape=(1, 1)), axis=0 )
                    
                    temp = int(object_count[target_object])
                    object_count[target_object] = temp + 1
                    
                    Y_scope = float(( int( height / n_boxes ) * i ) / height )
                    Y_alise = float(( int( height / n_boxes ) * ( i + 1 ) ) / height )
                    X_scope = float(( int( width / n_boxes ) * j ) / width )
                    X_alise = float(( int( width / n_boxes ) * ( j + 1 ) ) / width )
                    boxes_custom_input = tf.constant([ Y_scope, X_scope, Y_alise, X_alise ], shape=(1, 1, 4))
                    
                    image_cropped = tf.keras.preprocessing.image.img_to_array( tf.squeeze(image_cropped) / 256.0 )
                    
                    colors = tf.constant([[0.0, 0.0, 1.0]])
                    
                    if target_object == 0:
                        colors = tf.constant([[0.0, 0.0, 1.0]])
                    elif target_object == 1:
                        colors = tf.constant([[0.0, 0.5, 0.5]])
                    elif target_object == 2:
                        colors = tf.constant([[0.5, 0.5, 0.5]])
                    elif target_object == 3:
                        colors = tf.constant([[1.0, 0.0, 0.0]])
                    elif target_object == 4:
                        colors = tf.constant([[0.5, 0.5, 0.0]])
                    elif target_object == 5:
                        colors = tf.constant([[0.0, 1.0, 0.0]])
                    elif target_object == 6:
                        colors = tf.constant([[0.5, 1.0, 0.5]])
                    elif target_object == 7:
                        colors = tf.constant([[1.0, 0.5, 0.5]])
                    elif target_object == 8:
                        colors = tf.constant([[0.5, 0.5, 1.0]])
                    elif target_object == 9:
                        colors = tf.constant([[1.0, 1.0, 1.0]])
                    else:
                        colors = tf.constant([[0.0, 0.0, 0.0]])
                    
                    image_cropped = tf.image.draw_bounding_boxes(tf.constant(image_cropped, shape=(1, IMAGE_SIZE[0], IMAGE_SIZE[1], IMAGE_SIZE[2]), dtype=tf.float32), boxes_custom_input, colors)
                    image_cropped = tf.keras.preprocessing.image.img_to_array( tf.squeeze(image_cropped) *  255.0 )
    
    if b_training_object_detection :
    
        list_input_data = tf.cast( list_input_data, dtype=tf.float32 )
        list_label = tf.cast( list_label, dtype=tf.float32 )
        
        dataset_object_detection = tf.data.Dataset.from_tensor_slices((tf.constant(list_input_data, shape=(1, len(list_input_data), 21, 21, 3), dtype=tf.float32), 
                tf.constant(list_label, shape=(1, len(list_label), 1), dtype=tf.float32)))
                
        history = model.fit( dataset_object_detection, batch_size=500, epochs=1, callbacks=[custom_callback] )
        model.save_weights(checkpoint_path)
    
    
    ###################################################################################
    # image_cropped = image_target_number( image_cropped, object_position ) 
    ###################################################################################

    image_cropped = tf.constant( image_cropped, shape=IMAGE_SIZE )
    image_cropped = tf.keras.preprocessing.image.array_to_img( image_cropped )
    
    list_input_data = list_input_data[-100:,-2100:,-2100:,-300:]
    list_position_data = list_position_data[-100:,-300:]
    list_label = list_label[-100:,-100:]
    
    return image_cropped, object_count, "{:.2f}".format( tf.math.argmax( prediction_scores ).numpy() ), list_label, list_position_data
3
On

The below code will add a column of text to the existing google colab API and display the number of people just like this in this example Number of people

 def show_inference(model, image_path):
 global count
 count=0
 # the array based representation of the image will be used later in order 
 #to prepare the result image with boxes and labels on it.
 image_np = np.array(Image.open(image_path))
 # Actual detection.

 output_dict = run_inference_for_single_image(model, image_np)
 # Visualization of the results of a detection.
 vis_util.visualize_boxes_and_labels_on_image_array(
  image_np,
  output_dict['detection_boxes'],
  output_dict['detection_classes'],
  output_dict['detection_scores'],
  category_index,
  instance_masks=output_dict.get('detection_masks_reframed', None),
  use_normalized_coordinates=True,
  line_thickness=8)

 img=Image.fromarray(image_np)

 img.save('/content/my_pig.png')

 for o in output_dict['detection_scores']:
 if o > 0.5:
 count=count+1  


 im=cv2.imread("/content/my_pig.png")
 im = cv2.putText(im, 'Number of people'+str(count), (50, 50), 
 cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
 cv2_imshow(im)