keras 如何显示单个类的图像中的对象数?

ozxc1zmp  于 2022-11-13  发布在  其他
关注(0)|答案(2)|浏览(139)

我是一个编程新手,一直在学习谷歌colab中的对象检测算法教程。下面是谷歌对象检测API https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/object_detection_tutorial.ipynb中使用的代码,它使用单镜头检测器来输出图像。我添加了一个全局变量“count”和for循环来计数得分大于0.5的对象的数量。这将适用于单类检测的情况。检查了几张图片的结果,它正确地打印了计数值,如Number of dogs中所示。现在,我想在图片上显示这个数字。例如,在图片中添加了一行“人数:{count value}”请向我展示如何编辑下面的代码以实现结果。

def show_inference(model, image_path):
  global count
  count=0
  # the array based representation of the image will be used later in order to prepare 
  the result image with boxes and labels on it.       
  image_np = np.array(Image.open(image_path))
  # Actual detection.
  output_dict = run_inference_for_single_image(model, image_np)
   # Visualization of the results of a detection.
  vis_util.visualize_boxes_and_labels_on_image_array(
  image_np,
  output_dict['detection_boxes'],
  output_dict['detection_classes'],
  output_dict['detection_scores'],
  category_index,
  instance_masks=output_dict.get('detection_masks_reframed', None),
  use_normalized_coordinates=True,
  line_thickness=8)
  display(Image.fromarray(image_np))
  for o in output_dict['detection_scores']:
   if o > 0.5:
   count=count+1
 print(count)
for image_path in TEST_IMAGE_PATHS:
 show_inference(detection_model, image_path)
57hvy0tb

57hvy0tb1#

下面的代码将向现有的google colab API添加一列文本,并显示人数,就像本例中的Number of people

def show_inference(model, image_path):
 global count
 count=0
 # the array based representation of the image will be used later in order 
 #to prepare the result image with boxes and labels on it.
 image_np = np.array(Image.open(image_path))
 # Actual detection.

 output_dict = run_inference_for_single_image(model, image_np)
 # Visualization of the results of a detection.
 vis_util.visualize_boxes_and_labels_on_image_array(
  image_np,
  output_dict['detection_boxes'],
  output_dict['detection_classes'],
  output_dict['detection_scores'],
  category_index,
  instance_masks=output_dict.get('detection_masks_reframed', None),
  use_normalized_coordinates=True,
  line_thickness=8)

 img=Image.fromarray(image_np)

 img.save('/content/my_pig.png')

 for o in output_dict['detection_scores']:
 if o > 0.5:
 count=count+1  

 im=cv2.imread("/content/my_pig.png")
 im = cv2.putText(im, 'Number of people'+str(count), (50, 50), 
 cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
 cv2_imshow(im)
lpwwtiir

lpwwtiir2#

对于你的问题,创建框来计数,你可以很容易地做到这一点,通过这个例子,我用对象计数器AI的数字计数。
示例:使用Tensorflow的基本边界框和颜色,AI应用程序应用了相同的你可以使用YOLO或任何NN。它作为位图绘制FN,你需要在另一个图像上创建一个图像,你可以使它成为一个合作。

def search_screen( image_cropped ):
    image_cropped = tf.keras.preprocessing.image.img_to_array( image_cropped )
    image_cropped = tf.cast( image_cropped, dtype=tf.float32 )
    width = image_cropped.shape[1]
    height = image_cropped.shape[0]
    channels = image_cropped.shape[2]
    box_sizes = 10
    n_boxes = 10
    
    object_position = [ 0, 0, 0 ]
    object_properties = [ 0, 0, 0, 0, 0 ]
    object_count = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
    
    global list_input_data
    global list_position_data
    global list_label
    global scores
    
    list_input_data = tf.zeros([ 1, 21, 21, 3 ]).numpy()
    list_position_data = tf.zeros([ 1, 3 ]).numpy()
    list_label = tf.zeros([ 1, 1 ]).numpy()
    
    list_input_data = list_input_data[-100:,-2100:,-2100:,-300:]
    list_input_data = tf.cast( list_input_data, dtype=tf.float32 ).numpy()
    list_position_data = list_position_data[-100:,-300:]
    list_position_data = tf.cast( list_position_data, dtype=tf.float32 ).numpy()
    list_label = list_label[-100:,-100:]
    list_label = tf.cast( list_label, dtype=tf.float32 ).numpy()
    
    global i_count
    
    for i in range(n_boxes):
        for j in range(n_boxes):
            cropped_image_cell_search = tf.image.crop_to_bounding_box(image_cropped, int( CROP_SIZE[0] / 10 ) * i, 
                    int( CROP_SIZE[1] / 10 ) * j, int( CROP_SIZE[0] / 10 ), int( CROP_SIZE[1] / 10 ) )
            
            
            
            left_to_right = tf.image.flip_left_right(cropped_image_cell_search)
            up_to_down = tf.image.flip_up_down(cropped_image_cell_search)
            
            left_to_right = tf.math.count_nonzero( cropped_image_cell_search - left_to_right, dtype=tf.dtypes.int64 ).numpy()
            
            if left_to_right == 0 :
                pass

            else :
            
                up_to_down = tf.math.count_nonzero( cropped_image_cell_search - up_to_down, dtype=tf.dtypes.int64 ).numpy()
                
                a_rot = tf.image.rot90( cropped_image_cell_search )
                a_rot = tf.constant( a_rot, shape=( 16, 21, 3) )
                picture_temp = tf.constant( cropped_image_cell_search, shape=( 21, 16, 3 ) )
                a_rot = tf.concat([ tf.zeros([ 5, 21, 3]), a_rot], axis=0 )
                b_rot = tf.concat([ picture_temp, tf.zeros([ 21, 5, 3])], axis=1 )
                diag = tf.math.count_nonzero( tf.math.subtract( a_rot, b_rot, name='subtract' ) ).numpy()
                
                if ( diag <= 565 and diag >= 500 and up_to_down <= 96 and left_to_right >= 70 and left_to_right <= 100 ):
                
                    object_position[0] = i * height
                    object_position[1] = j * width
                    object_properties = [ 0, 0, 0, 0, 0 ]
                    object_properties[0] = left_to_right
                    object_properties[1] = up_to_down
                    object_properties[2] = diag
                    object_properties[3] = 1
                    object_properties[4] = 1
                    
                    target_object = 9
                    prediction_scores = tf.ones( [ n_objects ] ) * 95.00
                    
                    object_properties = tf.constant( object_properties, shape=( 5, 1, 1 ), dtype=tf.float32 )
                    object_properties = tf.keras.layers.UpSampling1D( size=63 )( object_properties )
                    object_properties = tf.constant( object_properties, shape=( 21, 5, 3 ) )
                    input_data = tf.squeeze( cropped_image_cell_search )
                    input_data = tf.concat( [input_data, object_properties], axis=1 )
                    label = tf.constant( 9, dtype=tf.int64 ).numpy()
                    
                    list_input_data = tf.experimental.numpy.append( list_input_data, tf.constant( input_data, shape=(1, 21, 21, 3)), axis=0 )
                    list_position_data = tf.experimental.numpy.append( list_position_data, tf.constant( object_position, shape=(1, 3)), axis=0 )
                    list_label = tf.experimental.numpy.append( list_label, tf.constant( label, shape=(1, 1)), axis=0 )
                
                    Y_scope = float(( int( height / n_boxes ) * i ) / height )
                    Y_alise = float(( int( height / n_boxes ) * ( i + 1 ) ) / height )
                    X_scope = float(( int( width / n_boxes ) * j ) / width )
                    X_alise = float(( int( width / n_boxes ) * ( j + 1 ) ) / width )
                    boxes_custom_input = tf.constant([ Y_scope, X_scope, Y_alise, X_alise ], shape=(1, 1, 4))
                    colors = tf.constant([[0.0, 0.0, 0.0]])
                    image_cropped = tf.keras.preprocessing.image.img_to_array( tf.squeeze(image_cropped) / 256.0 )
                    image_cropped = tf.image.draw_bounding_boxes(tf.constant(image_cropped, shape=(1, IMAGE_SIZE[0], IMAGE_SIZE[1], IMAGE_SIZE[2]), dtype=tf.float32), boxes_custom_input, colors)
                    image_cropped = tf.keras.preprocessing.image.img_to_array( tf.squeeze(image_cropped) *  255.0 )
                
                
                elif ( left_to_right > 130 and up_to_down > 130 and diag > 600  ) :
                    i_count = i_count + 1
                    object_position[0] = i * height
                    object_position[1] = j * width
                    object_properties = [ 0, 0, 0, 0, 0 ]
                    object_properties[0] = left_to_right
                    object_properties[1] = up_to_down
                    object_properties[2] = diag
                    object_properties[3] = 1
                    object_properties[4] = 1
                    
                    if b_save_image_object :
                        file = "F:\\temp\\image_catagorize\\20220620\\{filename_1:n}_{filename_2:n}".format(filename_1 = i_count, filename_2 = diag) + ".png"
                        tf.keras.utils.save_img(
                                file, cropped_image_cell_search, data_format=None, file_format=None, scale=True )
                
                    target_object, prediction_scores, input_data, label = identity_target_objects( cropped_image_cell_search, object_position, object_properties, n_boxes * i + j )
                    list_input_data = tf.experimental.numpy.append( list_input_data, tf.constant( input_data, shape=(1, 21, 21, 3)), axis=0 )
                    list_position_data = tf.experimental.numpy.append( list_position_data, tf.constant( object_position, shape=(1, 3)), axis=0 )
                    list_label = tf.experimental.numpy.append( list_label, tf.constant( label, shape=(1, 1)), axis=0 )
                    
                    temp = int(object_count[target_object])
                    object_count[target_object] = temp + 1
                    
                    Y_scope = float(( int( height / n_boxes ) * i ) / height )
                    Y_alise = float(( int( height / n_boxes ) * ( i + 1 ) ) / height )
                    X_scope = float(( int( width / n_boxes ) * j ) / width )
                    X_alise = float(( int( width / n_boxes ) * ( j + 1 ) ) / width )
                    boxes_custom_input = tf.constant([ Y_scope, X_scope, Y_alise, X_alise ], shape=(1, 1, 4))
                    
                    image_cropped = tf.keras.preprocessing.image.img_to_array( tf.squeeze(image_cropped) / 256.0 )
                    
                    colors = tf.constant([[0.0, 0.0, 1.0]])
                    
                    if target_object == 0:
                        colors = tf.constant([[0.0, 0.0, 1.0]])
                    elif target_object == 1:
                        colors = tf.constant([[0.0, 0.5, 0.5]])
                    elif target_object == 2:
                        colors = tf.constant([[0.5, 0.5, 0.5]])
                    elif target_object == 3:
                        colors = tf.constant([[1.0, 0.0, 0.0]])
                    elif target_object == 4:
                        colors = tf.constant([[0.5, 0.5, 0.0]])
                    elif target_object == 5:
                        colors = tf.constant([[0.0, 1.0, 0.0]])
                    elif target_object == 6:
                        colors = tf.constant([[0.5, 1.0, 0.5]])
                    elif target_object == 7:
                        colors = tf.constant([[1.0, 0.5, 0.5]])
                    elif target_object == 8:
                        colors = tf.constant([[0.5, 0.5, 1.0]])
                    elif target_object == 9:
                        colors = tf.constant([[1.0, 1.0, 1.0]])
                    else:
                        colors = tf.constant([[0.0, 0.0, 0.0]])
                    
                    image_cropped = tf.image.draw_bounding_boxes(tf.constant(image_cropped, shape=(1, IMAGE_SIZE[0], IMAGE_SIZE[1], IMAGE_SIZE[2]), dtype=tf.float32), boxes_custom_input, colors)
                    image_cropped = tf.keras.preprocessing.image.img_to_array( tf.squeeze(image_cropped) *  255.0 )
    
    if b_training_object_detection :
    
        list_input_data = tf.cast( list_input_data, dtype=tf.float32 )
        list_label = tf.cast( list_label, dtype=tf.float32 )
        
        dataset_object_detection = tf.data.Dataset.from_tensor_slices((tf.constant(list_input_data, shape=(1, len(list_input_data), 21, 21, 3), dtype=tf.float32), 
                tf.constant(list_label, shape=(1, len(list_label), 1), dtype=tf.float32)))
                
        history = model.fit( dataset_object_detection, batch_size=500, epochs=1, callbacks=[custom_callback] )
        model.save_weights(checkpoint_path)
    
    
    ###################################################################################
    # image_cropped = image_target_number( image_cropped, object_position ) 
    ###################################################################################

    image_cropped = tf.constant( image_cropped, shape=IMAGE_SIZE )
    image_cropped = tf.keras.preprocessing.image.array_to_img( image_cropped )
    
    list_input_data = list_input_data[-100:,-2100:,-2100:,-300:]
    list_position_data = list_position_data[-100:,-300:]
    list_label = list_label[-100:,-100:]
    
    return image_cropped, object_count, "{:.2f}".format( tf.math.argmax( prediction_scores ).numpy() ), list_label, list_position_data

输出量:

相关问题