Paddle 奇怪的错误

g2ieeal7  于 2022-10-20  发布在  其他
关注(0)|答案(5)|浏览(151)

环境:AI Studio高级版

错误

TypeError: conv2d(): incompatible function arguments. The following argument types are supported:
    1. (arg0: paddle::imperative::VarBase, arg1: paddle::imperative::VarBase, *args) -> paddle::imperative::VarBase

Invoked with: <paddle.fluid.core_avx.VarBase object at 0x7f04dd29bf70>, name: "conv2d_0.w_0"
type {
  type: LOD_TENSOR
  lod_tensor {
    tensor {
      data_type: FP32
      dims: 32
      dims: 3
      dims: 3
      dims: 3
    }
  }
}
persistable: true
, 'strides', [1, 1], 'paddings', [1, 1], 'dilations', [1, 1], 'groups', 1, 'use_cudnn', True

代码

import paddle
import paddle.fluid as fluid
import numpy as np
from paddle.fluid.dygraph import Conv2D, Pool2D, Linear, BatchNorm
from paddle.fluid.layers import softmax, cross_entropy, mean, accuracy
from paddle.fluid.optimizer import AdamOptimizer
from visualdl import LogWriter

cfg = {
    "batch_size": 128,
    "epoch_num": 20,
    "learning_rate": 1e-4,
    "save_model_dir": "model/MiniVGG"
}

train_reader = paddle.batch(
    fluid.io.shuffle(
        paddle.dataset.cifar.train100(), 5000), 
        batch_size=cfg["batch_size"], drop_last=True)

test_reader = paddle.batch(
    fluid.io.shuffle(
        paddle.dataset.cifar.test100(), 5000), 
        batch_size=cfg["batch_size"], drop_last=True)

class MiniVGG(fluid.dygraph.Layer):
    def __init__(self):
        super().__init__()
        self.conv_layers = [32, 64, 128]
        self.fc_layers = [1024, 1024]
        # (3, 32, 32)
        self._conv1_1 = Conv2D(num_channels=3, num_filters=self.conv_layers[0], filter_size=3, padding=1)
        self._conv1_2 = Conv2D(num_channels=self.conv_layers[0], num_filters=self.conv_layers[0], filter_size=3, padding=1)
        self._pool1 = Pool2D(pool_size=2, pool_stride=2)

        # (32, 16, 16)
        self._conv2_1 = Conv2D(num_channels=self.conv_layers[0], num_filters=self.conv_layers[1], filter_size=3, padding=1)
        self._conv2_2 = Conv2D(num_channels=self.conv_layers[1], num_filters=self.conv_layers[1], filter_size=3, padding=1)
        self._pool2 = Pool2D(pool_size=2, pool_stride=2)

        # (64, 8, 8)
        self._conv3_1 = Conv2D(num_channels=self.conv_layers[1], num_filters=self.conv_layers[2], filter_size=3, padding=1)
        self._conv3_2 = Conv2D(num_channels=self.conv_layers[2], num_filters=self.conv_layers[2], filter_size=3, padding=1)
        self._conv3_3 = Conv2D(num_channels=self.conv_layers[2], num_filters=self.conv_layers[2], filter_size=3, padding=1)
        self._pool3 = Pool2D(pool_size=2, pool_stride=2)

        # (128, 4, 4)
        self._bn = BatchNorm(self.conv_layers[2])
        self._fc1 = Linear(self.conv_layers[2]*4*4, self.fc_layers[0], act="relu")
        self._fc2 = Linear(self.fc_layers[0], self.fc_layers[1], act="relu")
        self._fc3 = Linear(self.fc_layers[1], 100, act="softmax")

    def forward(self, x):
        x = self._conv1_1(x)
        x = self._conv1_2(x)
        x = self._pool1(x)

        x = self._conv2_1(x)
        x = self._conv2_2(x)
        x = self._pool2(x)

        x = self._conv3_1(x)
        x = self._conv3_2(x)
        x = self._conv3_3(x)
        x = self._pool3(x)

        x = self._bn(x)
        x = fluid.layers.reshape(x, [-1, self.conv_layers[2]*4*4])
        x = self._fc1(x)
        x = self._fc2(x)
        x = self._fc3(x)

        return x

writer = LogWriter("log")

vdl_count1 = 0
vdl_count2 = 0
vgg = MiniVGG()    # 模型
with fluid.dygraph.guard(fluid.CUDAPlace(0)):
    for epoch in range(cfg["epoch_num"]):
        for batch_id, data in enumerate(train_reader()):
            # 定义数据(define)
            img = fluid.dygraph.to_variable(
                np.array(
                    [x[0].reshape(3, 32, 32) for x in data]
                ).astype(np.float32)
            )
            label = fluid.dygraph.to_variable(
                np.array(
                    [y[1] for y in data]
                ).astype(np.int64).reshape(-1, 1)
            )
            # 前向计算(forward)
            pred = vgg(img)
            pred = fluid.layers.reshape(pred, [-1, 100])
            # 损失函数(loss)
            loss = cross_entropy(pred, label)
            loss = fluid.layers.mean(loss)
            loss.backward()
            acc = fluid.layers.accuracy(pred, label)
            # 优化(backward)
            adam = AdamOptimizer(    # 优化器
                learning_rate=cfg["learning_rate"], 
                parameter_list=vgg.parameters())  
            adam.minimize(loss)    
            vgg.clear_gradients()
            if batch_id % 50 == 0:
                print("Epoch {0}   Batch {1}   Loss{2}   Acc{3}".format(
                    epoch, batch_id, loss.numpy(), acc.numpy()))
                writer.add_scalar(tag="Train/loss", step=vdl_count1, value=loss.numpy()[0])
                writer.add_scalar(tag="Train/acc", step=vdl_count1, value=acc.numpy()[0])
                vdl_count1 += 1

        # 评估           
        loss_list = list()
        acc_list = list()         
        for batch_id, data in enumerate(test_reader()):
            img = fluid.dygraph.to_variable(
                np.array(
                    [x[0].reshape(3, 32, 32) for x in data]
                ).astype(np.float32))
            label = fluid.dygraph.to_variable(
            np.array(
                    [y[1] for y in data]
                ).astype(np.int64).reshape(-1, 1))
            # forward
            pred = vgg(img)
            # loss
            loss = cross_entropy(pred, label)
            loss = mean(loss)
            acc = accuracy(pred, label)
            if batch_id % 10 == 0:
                writer.add_scalar(tag="Test/loss", step=vdl_count2, value=loss.numpy()[0])
                writer.add_scalar(tag="Test/acc", step=vdl_count2, value=acc.numpy()[0])
                vdl_count2 += 1
            # 记录loss和acc
            loss_list.append(loss.numpy()[0])
            acc_list.append(acc.numpy()[0])

        print("Eval ———— Loss {}   Acc {}\n".format(
            float(np.array(loss_list).mean()), float(np.array(acc_list).mean())))

    state_dict = vgg.state_dict()
    fluid.save_dygraph(state_dict, cfg["save_model_dir"])
    print("Save dygraph succeed!")

解决办法

重新运行一次出错的代码即可。出错的原因尽然是“出错”?不可思议~哪位RD小哥哥能解答一下

xlpyo6sf

xlpyo6sf1#

版本信息:

aistudio@jupyter-212554-995200:~$ pip show paddlepaddle-gpu
Name: paddlepaddle-gpu

Version: 1.8.4.post97

Summary: Parallel Distributed Deep Learning
Home-page: UNKNOWN
Author: UNKNOWN
Author-email: UNKNOWN
License: UNKNOWN
Location: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages
Requires: nltk, matplotlib, prettytable, rarfile, requests, graphviz, opencv-python, pyyaml, astor, scipy, gast, protobuf, six, numpy, decorator, pathlib, objgraph, Pillow, funcsigs
Required-by:

ua4mk5z4

ua4mk5z42#

出错的原因尽然是“出错”是什么意思啊?以及方便log贴一下是python哪一行出错?

qvsjd97n

qvsjd97n3#

出错的原因尽然是“出错”是什么意思啊?以及方便log贴一下是python哪一行出错?

这是具体的错误,只要运行上方代码就会出错(没抢到GPU,这次用的CPU)

---------------------------------------------------------------------------TypeError                                 Traceback (most recent call last)<ipython-input-5-74784aa7b3d7> in <module>
     64             )
     65             # 前向计算(forward)
---> 66             pred = net(img)
     67             pred = fluid.layers.reshape(pred, [-1, 10])
     68             # 损失函数(loss)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py in __call__(self, *inputs,**kwargs)
    459 
    460         with param_guard(self._parameters):
--> 461             outputs = self.forward(*inputs,**kwargs)
    462 
    463         for forward_post_hook in self._forward_post_hooks.values():
<ipython-input-5-74784aa7b3d7> in forward(self, x)
     27 
     28     def forward(self, x):
---> 29         x = self._conv1_1(x)
     30         x = self._conv1_2(x)
     31         x = self._pool1(x)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py in __call__(self, *inputs,**kwargs)
    459 
    460         with param_guard(self._parameters):
--> 461             outputs = self.forward(*inputs,**kwargs)
    462 
    463         for forward_post_hook in self._forward_post_hooks.values():
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/nn.py in forward(self, input)
    228                      'dilations', self._dilation, 'groups', self._groups
    229                      if self._groups else 1, 'use_cudnn', self._use_cudnn)
--> 230             out = core.ops.conv2d(input, self.weight, *attrs)
    231             pre_bias = out
    232 
TypeError: conv2d(): incompatible function arguments. The following argument types are supported:
    1. (arg0: paddle::imperative::VarBase, arg1: paddle::imperative::VarBase, *args) -> paddle::imperative::VarBase

Invoked with: <paddle.fluid.core_avx.VarBase object at 0x7f69c60972f0>, name: "conv2d_0.w_0"
type {
  type: LOD_TENSOR
  lod_tensor {
    tensor {
      data_type: FP32
      dims: 32
      dims: 1
      dims: 3
      dims: 3
    }
  }
}
persistable: true
, 'strides', [1, 1], 'paddings', [1, 1], 'dilations', [1, 1], 'groups', 1, 'use_cudnn', True
vltsax25

vltsax254#

奇怪,我用你同样的代码跑了一下,你的代码是能跑过的:

但是我的paddle版本是develop(开发版本,因此是最新的),感觉可能是paddle某个错误已经修复了。你看看你还有可能更新paddle到更高版本吗?

aelbi1ox

aelbi1ox5#

我的Paddle版本是1.8.4,您说的develop是用最新的2.0去做的吗?
这是我第一次尝试动态图训练模型,出了这个问题很奇怪

相关问题