Paddle fill_constant error

e0uiprwp  于 2021-11-30  发布在  Java
关注(0)|答案(2)|浏览(345)

报错信息:

terminate called after throwing an instance of 'paddle::platform::EnforceNotMet'
  what():  Invoke operator fill_constant error.
Python Callstacks:
  File "/home/work/zhaoyijin/disk2-zhaoyijin/ffm/paddle_release_home/python/lib/python2.7/site-packages/paddle/fluid/framework.py", line 1771, in append_op
    attrs=kwargs.get("attrs", None))
  File "/home/work/zhaoyijin/disk2-zhaoyijin/ffm/paddle_release_home/python/lib/python2.7/site-packages/paddle/fluid/layer_helper.py", line 43, in append_op
    return self.main_program.current_block().append_op(*args,**kwargs)
  File "/home/work/zhaoyijin/disk2-zhaoyijin/ffm/paddle_release_home/python/lib/python2.7/site-packages/paddle/fluid/layers/tensor.py", line 412, in fill_constant
    stop_gradient=True)
  File "/home/disk2/zhaoyijin/ffm/model_train/network_conf_new.py", line 119, in ffm_model
    context_user_emb = fluid.layers.fill_constant(shape=[-1, embedding_size], dtype='float32', value=0.0)
  File "local_train.py", line 31, in train
    loss, auc, data_list = ffm_model(args.embedding_size, dict_size)
  File "local_train.py", line 72, in <module>
    train()
C++ Callstacks:
Enforce failed. Expected numel() >= 0, but received numel():-10 < 0:0.
When calling this method, the Tensor's numel must be equal or larger than zero. Please check Tensor::Resize has been called first. at [/paddle/paddle/fluid/framework/tensor.cc:43]
PaddlePaddle Call Stacks:
0       0x7f10f4d90bf8p void paddle::platform::EnforceNotMet::Init<std::string>(std::string, char const*, int) + 360
1       0x7f10f4d90f47p paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int) + 87
2       0x7f10f61148a8p paddle::framework::Tensor::mutable_data(boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_>, paddle::framework::proto::VarType_Type, unsigned long) + 776
3       0x7f10f50a775ep paddle::operators::FillConstantKernel<float>::Compute(paddle::framework::ExecutionContext const&) const + 494
4       0x7f10f50a78b3p std::_Function_handler<void (paddle::framework::ExecutionContext const&), paddle::framework::OpKernelRegistrarFunctor<paddle::platform::CPUPlace, false, 0ul, paddle::operators::FillConstantKernel<float>, paddle::operators::FillConstantKernel<double>, paddle::operators::FillConstantKernel<long>, paddle::operators::FillConstantKernel<int>, paddle::operators::FillConstantKernel<paddle::platform::float16> >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::_M_invoke(std::_Any_data const&, paddle::framework::ExecutionContext const&) + 35
5       0x7f10f60d9627p paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&, paddle::framework::RuntimeContext*) const + 375
6       0x7f10f60d9d91p paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&) const + 529
7       0x7f10f60d7c3bp paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&) + 267
8       0x7f10f4f3b366p paddle::framework::HogwildWorker::TrainFiles() + 278
9       0x7f11059278a0p
10      0x7f12099071c3p
11      0x7f1208f2f12dp clone + 109

code:

def ffm_model(embedding_size, dict_size):
    input_list = create_input_variable()
    label = fluid.layers.data(
            name='label', shape=[1], dtype='float32')

    dict_emb = create_emb(embedding_size, input_list, dict_size)  # feature_name : {field: emb}

    # 同域的emb做sum pooling
    context_user_emb = fluid.layers.fill_constant(shape=[-1, embedding_size], dtype='float32', value=0.0)
    context_doc_emb = fluid.layers.fill_constant(shape=[-1, embedding_size], dtype='float32', value=0.0)
    user_context_emb = fluid.layers.fill_constant(shape=[-1, embedding_size], dtype='float32', value=0.0)
    user_doc_emb = fluid.layers.fill_constant(shape=[-1, embedding_size], dtype='float32', value=0.0)
    doc_context_emb = fluid.layers.fill_constant(shape=[-1, embedding_size], dtype='float32', value=0.0)
    doc_user_emb = fluid.layers.fill_constant(shape=[-1, embedding_size], dtype='float32', value=0.0)
    # context
    for feature_name in global_def.CONTEXT_FEATURE_LIST:
        print dict_emb[feature_name]['user']
        context_user_emb += dict_emb[feature_name]['user']
        context_doc_emb += dict_emb[feature_name]['doc']
    # user
    for feature_name in global_def.USER_FEATURE_LIST:
        user_context_emb += dict_emb[feature_name]['context']
        user_doc_emb += dict_emb[feature_name]['doc']
    # doc
    for feature_name in global_def.DOC_FEATURE_LIST:
        doc_context_emb += dict_emb[feature_name]['context']
        doc_user_emb += dict_emb[feature_name]['user']

    field_cross_interaction = fluid.layers.reduce_sum(user_doc_emb * doc_user_emb \
            + user_context_emb * context_user_emb \
            + doc_context_emb * context_doc_emb, dim=1, keep_dim=True)

    bias = fluid.layers.create_parameter(shape=[1], dtype="float32", name='bias')

    predict = fluid.layers.sigmoid(field_cross_interaction + bias)

    cost = fluid.layers.log_loss(input=predict, label=label)
    batch_cost = fluid.layers.reduce_sum(cost)

    # for auc
    predict_2d = fluid.layers.concat([1 - predict, predict], 1)
    label_int = fluid.layers.cast(label, 'int64')
    auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict_2d,
                                                          label=label_int,
                                                          slide_steps=0)
    city_code, device_info, refresh_time, \
        age, gender, educational, \
        nid, mthid, manual_tags, cate_v2, sub_cate_v2 = input_list

    return batch_cost, auc_var, [city_code, device_info, refresh_time, \
                age, gender, educational, \
                nid, mthid, manual_tags, cate_v2, sub_cate_v2, \
                label]

def create_emb(emb_size, input_list, dict_size):
    city_code, device_info, refresh_time, \
        age, gender, educational, \
        nid, mthid, manual_tags, cate_v2, sub_cate_v2 = input_list

    dict_emb = {} # feature_name : {field: emb}

    for feature_name in global_def.CONTEXT_FEATURE_LIST:
        feature_dict_dize = dict_size[feature_name]
        user_emb = fluid.layers.embedding(
                input=eval(feature_name), size=[feature_dict_dize, emb_size],
                param_attr=fluid.ParamAttr(name="_proj_" + feature_name,
                    initializer=fluid.initializer.Normal(scale=1/math.sqrt(feature_dict_dize))),
                is_sparse=False, is_distributed=False)
        doc_emb = fluid.layers.embedding(
                input=eval(feature_name), size=[feature_dict_dize, emb_size],
                param_attr=fluid.ParamAttr(name="_proj_" + feature_name,
                    initializer=fluid.initializer.Normal(scale=1/math.sqrt(feature_dict_dize))),
                is_sparse=False, is_distributed=False)
        dict_emb[feature_name] = {}
        dict_emb[feature_name]['user'] = user_emb
        dict_emb[feature_name]['doc'] = doc_emb

    for feature_name in global_def.USER_FEATURE_LIST:
        feature_dict_dize = dict_size[feature_name]
        context_emb = fluid.layers.embedding(
                input=eval(feature_name), size=[feature_dict_dize, emb_size],
                param_attr=fluid.ParamAttr(name="_proj_" + feature_name,
                    initializer=fluid.initializer.Normal(scale=1/math.sqrt(feature_dict_dize))),
                is_sparse=False, is_distributed=False)
        doc_emb = fluid.layers.embedding(
                input=eval(feature_name), size=[feature_dict_dize, emb_size],
                param_attr=fluid.ParamAttr(name="_proj_" + feature_name,
                    initializer=fluid.initializer.Normal(scale=1/math.sqrt(feature_dict_dize))),
                is_sparse=False, is_distributed=False)
        dict_emb[feature_name] = {}
        dict_emb[feature_name]['context'] = context_emb
        dict_emb[feature_name]['doc'] = doc_emb

    for feature_name in global_def.DOC_FEATURE_LIST:
        feature_dict_dize = dict_size[feature_name]
        context_emb = fluid.layers.embedding(
                input=eval(feature_name), size=[feature_dict_dize, emb_size],
                param_attr=fluid.ParamAttr(name="_proj_" + feature_name,
                    initializer=fluid.initializer.Normal(scale=1/math.sqrt(feature_dict_dize))),
                is_sparse=False, is_distributed=False)
        user_emb = fluid.layers.embedding(
                input=eval(feature_name), size=[feature_dict_dize, emb_size],
                param_attr=fluid.ParamAttr(name="_proj_" + feature_name,
                    initializer=fluid.initializer.Normal(scale=1/math.sqrt(feature_dict_dize))),
                is_sparse=False, is_distributed=False)
        if feature_name in ['manual_tags', 'sub_cate_v2']:
            context_emb_avg = fluid.layers.sequence_pool(input=context_emb, pool_type='average')
            user_emb_avg = fluid.layers.sequence_pool(input=user_emb, pool_type='average')
            dict_emb[feature_name] = {}
            dict_emb[feature_name]['context'] = context_emb_avg
            dict_emb[feature_name]['user'] = user_emb_avg
        else:
            dict_emb[feature_name] = {}
            dict_emb[feature_name]['context'] = context_emb
            dict_emb[feature_name]['user'] = user_emb
    return dict_emb

code说明:模型实现了FFM,想要对不同域的embedding做sum pooling操作。在循环中用context_user_emb += ...的操作,所以需要在循环外先定义变量context_user_emb,可能报错是从这里出来的:

context_user_emb = fluid.layers.fill_constant(shape=[-1, embedding_size], dtype='float32', value=0.0)
kyxcudwk

kyxcudwk1#

建议使用fill_const_batch_size_like,https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/layers_cn/tensor_cn.html#fill-constant-batch-size-like
context_user_emb = fluid.layers.fill_constant_batch_size_like(input=like, shape=[embedding_size], dtype='float32', value=0.0)

mdfafbf1

mdfafbf12#

最后改成这样定义了,不报错了。

context_user_emb = None
    context_doc_emb = None
    user_context_emb = None
    user_doc_emb = None
    doc_context_emb = None
    doc_user_emb = None
    # context
    for feature_name in global_def.CONTEXT_FEATURE_LIST:
        if context_user_emb is None:
            context_user_emb = dict_emb[feature_name]['user']
        else:
            context_user_emb += dict_emb[feature_name]['user']
        if context_doc_emb is None:
            context_doc_emb = dict_emb[feature_name]['doc']
        else:
            context_doc_emb += dict_emb[feature_name]['doc']

相关问题