Paddle 【预测问题】infer阶段dump数据到文件报错

kmbjn2e3  于 2022-04-21  发布在  Java
关注(0)|答案(2)|浏览(326)

code:
infer_path = config.infer_result_path
model._train_program._fleet_opt["dump_fields"] = ["insid", "click", "pctr", model.predict.name]
model._train_program._fleet_opt["dump_fields_path"] = infer_path
模型上一个二分累ctr预估模型
"insid", "click", "pctr", model.predict.name分别是表示用户名,点击label,输入的得分数据和模型预测输出数据
但是dump到文件的数据却是这样

用户名,点击label,输入的得分数据是正确的,但是模型预测输出数据数据不对
麻烦看下是什么问题?

cwxwcias

cwxwcias1#

是否可以提供下组网呢

b1uwtaje

b1uwtaje2#

是否可以提供下组网呢
这个是组网,是一个三层MLP
import os
import math
import paddle.fluid as fluid
import numpy as np

from sklearn.metrics import precision_score, recall_score

class Model(object):
"""
Model for ES
"""
definit(self):
"""

init

"""
self._dense_feature_dim = 21
self._fc_layers_size = [20, 10] # change to es model mlp
self._dense_param_name = "DenseFeatFactors"
self._is_bn = False # set False
self._train_program = fluid.Program()
self._startup_program = fluid.Program()
self._feed_vars = []
self._fetch_vars = []


# self.dense_param_attr = fluid.param_attr.ParamAttr(name=self._dense_param_name,

    #     initializer=fluid.initializer.UniformInitializer(low=-0.5, high=0.5))

    with fluid.program_guard(self._train_program, self._startup_program):
        with fluid.unique_name.guard():
            self.dense_input = fluid.layers.data(name="dense_input", \
                    shape=[self._dense_feature_dim], dtype="float32", lod_level=0)
            self.label = fluid.layers.data(name="click", shape=[1], dtype="int64", lod_level=0)
            # es has no ins_weight
            # self.weight_input = fluid.layers.data(name="ins_weight", shape=[1], dtype="float32", lod_level=0)
            self.pctr = fluid.layers.data(name="pctr", shape=[1], dtype="float32", lod_level=0)    # get pctr from dataset

            self._base_net(self.label, self.pctr)

def _mlp(self, concat, lr_x=1.0):
    """
    _mlp mlp layers
    """
    is_bn = self._is_bn
    fc_layers_input = [concat]
    fc_layers_size = self._fc_layers_size
    fc_layers_act = ["relu"] * (len(fc_layers_size))
    # set gc layer name to get parameters
    for i in range(len(fc_layers_size)):
        fc = fluid.layers.fc(
                input = fc_layers_input[-1],
                size = fc_layers_size[i],
                act = fc_layers_act[i],
                param_attr = fluid.ParamAttr(initializer=fluid.initializer.Normal(scale = \
                        1 / math.sqrt(fc_layers_input[-1].shape[1]))),
                name = "fc_layer_" + str(i))
                #param_attr = fluid.ParamAttr(learning_rate=lr_x, initializer=fluid.initializer.MSRA()))
        if is_bn:
            fc = fluid.layers.batch_norm(input=fc)
        fc_layers_input.append(fc)

    return fc_layers_input[-1]

def _es_loss(self, pctr, predict, label):
    """
    es loss function 
    """
    label = label * 2 - 1
    label = label.astype('float32')
    # pred = pctr - predict
    pred = pctr - fluid.layers.slice(predict, [1], [1], [2])
    exp_ind = label * pred * (-1)
    loss = fluid.layers.exp(exp_ind)
    pred_slice = fluid.layers.slice(predict, [1], [1], [2])

    return loss, label, pctr, pred_slice

def _base_net(self, label, pctr, lr_x=1.0):
    """
    _base_net
    """
    # fcs
    # self.concat_bn = fluid.layers.data_norm(input=self.dense_input, name="common", epsilon=1e-4)

    self.fc_input = self._mlp(self.dense_input)
    self.predict = fluid.layers.fc(input=[self.fc_input], size=2, act="softmax", \
            param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(scale = \
            1 / math.sqrt(self.fc_input.shape[1]))), name="predict")

    # cross_entropy loss
    # self.cost = fluid.layers.cross_entropy(input=self.predict, label=label)
    # es loss 
    self.es_loss, self.es_label, self.es_pctr, self.pred_slice = self._es_loss(pctr=pctr, predict=self.predict, label=label)
    self.cost = self.es_loss

    self.avg_cost = fluid.layers.mean(self.cost)

    # print(self.es_loss, self.es_label, self.pctr, self.cost)

    # calculate loss with sigmoid activate and log_loss 
    # the last layer must have a layer name to pass infer.py
    # self.fc_input = self._mlp_feed(self.concat_emb)    # feed into sigmoid 

    self.auc, self.batch_auc, [self.batch_stat_pos, self.batch_stat_neg, self.stat_pos, self.stat_neg] = \
            fluid.layers.auc(input=self.predict, label=label, curve='ROC')
    # feed vars
    self._feed_vars =  [self.dense_input]
    # fetch vars
    self._fetch_vars = [self.predict]

相关问题