python ValueError:必须始终传递ウLayer.call Ж的第一个参数,我希望以子类化的方式键入此模型,

von4xj4u  于 2022-10-30  发布在  Python
关注(0)|答案(1)|浏览(164)

我有一些不同的块,因为我必须使用共享参数,所以我需要以子类形式实现此模型

卷积和恒等块

  1. def convolutional_block(x, filter):
  2. # copy tensor to variable called x_skip
  3. x_skip = x
  4. # Layer 1
  5. x = tf.keras.layers.Conv2D(filter, (1, 1), strides = (2, 2))(x)
  6. x = tf.keras.layers.BatchNormalization(axis=3)(x)
  7. x = tf.keras.layers.Activation('relu')(x)
  8. # Layer 2
  9. x = tf.keras.layers.Conv2D(filter, (3, 3), padding = 'same')(x)
  10. x = tf.keras.layers.BatchNormalization(axis=3)(x)
  11. x = tf.keras.layers.Activation('relu')(x)
  12. # Layer 3
  13. x = tf.keras.layers.Conv2D(filter * 4, (1, 1))(x)
  14. x = tf.keras.layers.BatchNormalization(axis=3)(x)
  15. # Processing Residue with conv(1,1)
  16. x_skip = tf.keras.layers.Conv2D(filter * 4, (1, 1), strides = (2, 2))(x_skip)
  17. # Add Residue
  18. x = tf.keras.layers.Add()([x, x_skip])
  19. x = tf.keras.layers.Activation('relu')(x)
  20. return x
  21. def identity_block(x, filter, name):
  22. # copy tensor to variable called x_skip
  23. x_skip = x
  24. # Layer 1
  25. x = tf.keras.layers.Conv2D(filter, (1, 1))(x)
  26. x = tf.keras.layers.BatchNormalization(axis=3)(x)
  27. x = tf.keras.layers.Activation('relu')(x)
  28. # Layer 2
  29. x = tf.keras.layers.Conv2D(filter, (3, 3), padding = 'same')(x)
  30. x = tf.keras.layers.BatchNormalization(axis=3)(x)
  31. x = tf.keras.layers.Activation('relu')(x)
  32. # Layer 3
  33. x = tf.keras.layers.Conv2D(filter * 4, (1, 1))(x)
  34. x = tf.keras.layers.BatchNormalization(axis=3)(x)
  35. # Add Residue
  36. x = tf.keras.layers.Add()([x, x_skip])
  37. x = tf.keras.layers.Activation('relu', name=f'identity_{name}')(x)
  38. return x

互斥锁块

  1. class MutexAttentionBlock(tf.keras.layers.Layer):
  2. def __init__(self):
  3. super(MutexAttentionBlock, self).__init__()
  4. self.softmax = tf.keras.layers.Activation('softmax')
  5. self.multiply = tf.keras.layers.Multiply()
  6. def call(self, x, y):
  7. distance = tf.math.square(tf.math.subtract(x, y))
  8. x = tf.keras.layers.Reshape((-1, distance.shape[-1]))(distance)
  9. x = self.softmax(x)
  10. x = tf.keras.layers.Reshape((distance[1], distance[2], distance[3]))(x)
  11. f_am = self.multiply(x, y)
  12. return f_am

融合块

  1. class FuseAttentionBlock(tf.keras.layers.Layer):
  2. def __init__(self):
  3. super(FuseAttentionBlock, self).__init__()
  4. self.first_add = tf.keras.layers.Add()
  5. self.global_avg_pool = tf.keras.layers.GlobalAveragePooling2D()
  6. self.global_max_pool = tf.keras.layers.GlobalMaxPooling2D()
  7. self.second_add = tf.keras.layers.Add()
  8. self.concat = tf.keras.layers.Concatenate(axis=0)
  9. self.softmax = tf.keras.layers.Softmax(axis=0)
  10. self.first_multiply = tf.keras.layers.Multiply()
  11. self.second_multiply = tf.keras.layers.Multiply()
  12. self.third_add = tf.keras.layers.Add()
  13. def call(self, f_am, y):
  14. f_mix = self.first_add([f_am, y])
  15. x = self.global_avg_pool(f_mix)
  16. y = self.global_max_pool(f_mix)
  17. x = self.second_add([x, y])
  18. for i in range(2):
  19. x = tf.keras.layers.Dense(32)(x)
  20. x = tf.keras.layers.BatchNormalization()(x)
  21. x = tf.keras.layers.Activation('relu')(x)
  22. m = tf.keras.layers.Dense(f_am.shape[-1])(x)
  23. n = tf.keras.layers.Dense(y.shape[-1])(x)
  24. concated = self.concat([m, n], axis=0)
  25. attention_weights = self.softmax(concated)
  26. f_am = self.first_multiply([f_am, attention_weights])
  27. y = self.second_multiply([y, attention_weights])
  28. f_fm = self.third_add([f_am, y])
  29. return f_fm

最终架构

  1. class MutexAttentionResModel(tf.keras.models.Model):
  2. def __init__(self, shape, num_classes):
  3. super(MutexAttentionResModel, self).__init__()
  4. self.shape=shape
  5. self.num_classes = num_classes
  6. input_img = tf.keras.layers.Input(self.shape)
  7. x = tf.keras.layers.ZeroPadding2D((3, 3))(input_img)
  8. x = tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same')(x)
  9. x = tf.keras.layers.BatchNormalization()(x)
  10. x = tf.keras.layers.Activation('relu')(x)
  11. x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
  12. self.first_layer = tf.keras.Model(inputs=input_img, outputs=x)
  13. input_second_layer = self.first_layer.output
  14. x = convolutional_block(input_second_layer, 64)
  15. for i in range(2):
  16. x = identity_block(x, 64, name=f'2_{i}')
  17. self.second_layer = tf.keras.Model(inputs=input_second_layer, outputs=x)
  18. input_third_layer = self.second_layer.output
  19. x = convolutional_block(input_third_layer, 128)
  20. for i in range(3):
  21. x = identity_block(x, 128, name=f'3_{i}')
  22. self.third_layer = tf.keras.Model(inputs=input_third_layer, outputs=x)
  23. input_fourth_layer = self.third_layer.output
  24. x = convolutional_block(input_fourth_layer, 256)
  25. for i in range(5):
  26. x = identity_block(x, 256, name=f'4_{i}')
  27. self.fourth_layer = tf.keras.Model(inputs=input_fourth_layer, outputs=x)
  28. input_fifth_layer = self.fourth_layer.output
  29. x = convolutional_block(input_fifth_layer, 512)
  30. for i in range(2):
  31. x = identity_block(x, 512, name=f'5_{i}')
  32. self.fifth_layer = tf.keras.Model(inputs=input_fifth_layer, outputs=x)
  33. self.first_mutex_attention_block = MutexAttentionBlock()
  34. self.first_fuse_attention_block = FuseAttentionBlock()
  35. self.second_mutex_attention_block = MutexAttentionBlock()
  36. self.second_fuse_attention_block = FuseAttentionBlock()
  37. self.third_mutex_attention_block = MutexAttentionBlock()
  38. self.third_fuse_attention_block = FuseAttentionBlock()
  39. self.fourth_mutex_attention_block = MutexAttentionBlock()
  40. self.fourth_fuse_attention_block = FuseAttentionBlock()
  41. self.fifth_mutex_attention_block = MutexAttentionBlock()
  42. self.fifth_fuse_attention_block = FuseAttentionBlock()
  43. self.global_max_pool = tf.keras.layers.GlobalMaxPooling2D()
  44. self.first_dense = tf.keras.layers.Dense(self.num_classes, activation='softmax')
  45. self.second_dense = tf.keras.layers.Dense(self.num_classes, activation='softmax')
  46. def call(self, pos_input, neg_input):
  47. x = self.first_layer(pos_input)
  48. y = self.first_layer(neg_input)
  49. f_am = self.first_mutex_attention_block(x, y)
  50. y = self.first_fuse_attention_block(f_am, y)
  51. x = self.second_layer(x)
  52. y = self.second_layer(y)
  53. f_am = self.second_mutex_attention_block(x, y)
  54. y = self.second_fuse_attention_block(f_am, y)
  55. x = self.third_layer(x)
  56. y = self.third_layer(y)
  57. f_am = self.third_mutex_attention_block(x, y)
  58. y = self.third_fuse_attention_block(f_am, y)
  59. x = self.fourth_layer(x)
  60. y = self.fourth_layer(y)
  61. f_am = self.fourth_mutex_attention_block(x, y)
  62. y = self.fourth_fuse_attention_block(f_am, y)
  63. x = self.fifth_layer(x)
  64. y = self.fifth_layer(y)
  65. f_am = self.fifth_mutex_attention_block(x, y)
  66. y = self.fifth_fuse_attention_block(f_am, y)
  67. x_flatten = tf.keras.layers.Flatten(name='x_flatten')(x)
  68. y_flatten = tf.keras.layers.Flatten(name='y_flatten')(y)
  69. x = self.global_max_pool(x)
  70. y = self.global_max_pool(y)
  71. x = self.first_dense(x)
  72. y = self.second_dense(y)
  73. return x, y, x_flatten, y_flatten

尝试取得子类别模型的摘要

  1. model = MutexAttentionResModel(shape = config.IMG_SHAPE, num_classes = 2)
  2. image = plt.imread('architecture\images.jpg')
  3. image = tf.image.resize(image,[224,224])
  4. image = tf.cast(image,dtype = tf.float32)
  5. image = tf.expand_dims(image,0)
  6. output = model(ct_input= image,mutex_input = image)
  7. print(image.shape)
  8. model.summary()

我接受这个错误我不知道该怎么办.

值错误:必须始终传递Layer.call的第一个参数。

jchrr9hc

jchrr9hc1#

试试这个,但我会告诉你这个错误背后的原因,这个错误发生时,你使用相同的两个层的一个名称,并将其发送到keras.模型,我已经改变了代码一点点,并不返回模型输出在每一层只是返回x它是一样的,因为你正在做的,但复杂性很低...
第一个问题:

相关问题