我正在使用TensorFlow的量化感知训练API,希望部署一个具有任意位宽的模型。由于tflite部署仅支持8位量化,因此我将使用自定义推理算法进行部署,但我仍然需要以正确的大小访问模型的权重。
目前,在使用量化感知训练后,我的模型仍然是浮点型的,据我所知,访问量化权重的唯一方法是将模型转换为tflite格式。然而,当使用实验函数时,这是不可能的。
下面是我的quantization配置类:
class Quantizer(tfmot.quantization.keras.QuantizeConfig):
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, tfmot.quantization.keras.quantizers.LastValueQuantizer(num_bits=8, symmetric=True, narrow_range=False, per_axis=False))]
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return [(layer.activation, tfmot.quantization.keras.quantizers.MovingAverageQuantizer(num_bits=8, symmetric=False, narrow_range=False, per_axis=False))]
def set_quantize_weights(self, layer, quantize_weights):
# Add this line for each item returned in `get_weights_and_quantizers`
# , in the same order
layer.kernel = quantize_weights[0]
def set_quantize_activations(self, layer, quantize_activations):
# Add this line for each item returned in `get_activations_and_quantizers`
# , in the same order.
layer.activation = quantize_activations[0]
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
class ModifiedQuantizer(Quantizer):
# Configure weights to quantize with 4-bit instead of 8-bits.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, quantizer(num_bits=bits, symmetric=symmetric, narrow_range=narrow_range, per_axis=per_axis))]
下面是我如何量化模型:
supported_layers = [
tf.keras.layers.Conv2D,
tf.keras.layers.DepthwiseConv2D
]
class Quantizer(tfmot.quantization.keras.QuantizeConfig):
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, tfmot.quantization.keras.quantizers.LastValueQuantizer(num_bits=8, symmetric=True, narrow_range=False, per_axis=False))]
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return [(layer.activation, tfmot.quantization.keras.quantizers.MovingAverageQuantizer(num_bits=8, symmetric=False, narrow_range=False, per_axis=False))]
def set_quantize_weights(self, layer, quantize_weights):
# Add this line for each item returned in `get_weights_and_quantizers`
# , in the same order
layer.kernel = quantize_weights[0]
def set_quantize_activations(self, layer, quantize_activations):
# Add this line for each item returned in `get_activations_and_quantizers`
# , in the same order.
layer.activation = quantize_activations[0]
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
class ModifiedQuantizer(Quantizer):
# Configure weights to quantize with 4-bit instead of 8-bits.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, quantizer(num_bits=bits, symmetric=symmetric, narrow_range=narrow_range, per_axis=per_axis))]
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return [(layer.activation, tfmot.quantization.keras.quantizers.MovingAverageQuantizer(num_bits=bits, symmetric=False, narrow_range=False, per_axis=False))]
def quantize_all_layers(layer):
for supported_layer in supported_layers:
if isinstance(layer, supported_layer):
return quantize_annotate_layer(layer, quantize_config=ModifiedQuantizer())
# print(layer.name)
return layer
annotated_model = clone_model(
model,
clone_function=quantize_all_layers
)
with quantize_scope(
{'Quantizer': Quantizer},
{'ModifiedQuantizer': ModifiedQuantizer},
{'_relu6': models._relu6}):
q_aware_model = quantize_apply(annotated_model)
optimizer = keras.optimizers.Adam(
learning_rate=0.001)
q_aware_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True),
optimizer=optimizer, metrics=['sparse_categorical_accuracy'])
train_images, train_labels, val_images, val_labels, _, _ = cifar10.load()
q_aware_model.fit(train_images, train_labels, batch_size=64, epochs=1, verbose=1,
validation_data=(val_images, val_labels))
如前所述,当在ModifiedQuantizer中使用例如bits=4时,模型仍然以浮点形式保存,并且我不知道如何访问量化的权重。
谢谢!
1条答案
按热度按时间34gzjxbg1#
我想你可以通过调用给定层的权重Tensor
LastValueQuantizer.__call__
来获得量化的权重。如何调用该方法是个问题。当前签名为:
我假设
inputs
是层的权重,weights
是LastValueQuantizer.build
返回的值。如果你能得到build
返回的weights
的引用,我希望直接使用LastValueQuantizer.__call__
量化层的权重是很简单的。