System information
ubuntu18.04 ,linux ,nvidia Tesla V100-PCIE,
-PaddlePaddle version (eg.1.1)or CommitID
1.8
-GPU: including CUDA/CUDNN version
cuda 10.0 , cudnn 7
-OS Platform (eg.Mac OS 10.14)
ubuntu
-Python version
python 3.8.3
-API information
train ocr attention with gpu with paddle1.8, then convert into c++ version,when run predict ,occur errors:
I1223 08:05:24.510879 26118 analysis_predictor.cc:496] ======= optimize end =======
I1223 08:05:24.510918 26118 naive_executor.cc:95] --- skip [feed], feed -> init_scores
I1223 08:05:24.510921 26118 naive_executor.cc:95] --- skip [feed], feed -> init_ids
I1223 08:05:24.510923 26118 naive_executor.cc:95] --- skip [feed], feed -> pixel
I1223 08:05:24.511201 26118 naive_executor.cc:95] --- skip [save_infer_model/scale_0.tmp_0], fetch -> fetch
W1223 08:05:24.511250 26118 device_context.cc:252] Please NOTE: device: 0, CUDA Capability: 70, Driver API Version: 10.2, Runtime API Version: 10.0
W1223 08:05:24.514454 26118 device_context.cc:260] device: 0, cuDNN Version: 7.6.
terminate called after throwing an instance of 'paddle::platform::EnforceNotMet'
what():
C++ Call Stacks (More useful to developers):
0 std::string paddle::platform::GetTraceBackString<std::string const&>(std::string const&, char const*, int)
1 paddle::framework::Tensor::check_memory_size() const
2 paddle::framework::TensorCopy(paddle::framework::Tensor const&, paddle::platform::Place const&, paddle::platform::DeviceContext const&, paddle::framework::Tensor*)
3 paddle::operators::ReadFromArrayOp::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const
4 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&)
5 paddle::framework::Executor::RunPartialPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, long, long, bool, bool, bool)
6 paddle::framework::Executor::RunPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, bool, bool, bool)
7 paddle::operators::WhileOp::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const
8 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&)
9 paddle::framework::NaiveExecutor::Run()
10 paddle::AnalysisPredictor::ZeroCopyRun()
Python Call Stacks (More useful to users):
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/site-packages/paddle/fluid/framework.py", line 2610, in append_op
attrs=kwargs.get("attrs", None))
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/site-packages/paddle/fluid/layer_helper.py", line 43, in append_op
return self.main_program.current_block().append_op(*args,**kwargs)
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/site-packages/paddle/fluid/layers/control_flow.py", line 1794, in array_read
outputs={'Out': [out]})
File "/ssd4/moshuojie/code/ocr_recognition/attention_model.py", line 301, in attention_infer
pre_ids = fluid.layers.array_read(array=ids_array, i=counter)
File "/ssd4/moshuojie/code/ocr_recognition/infer.py", line 67, in inference
ids = infer(images, num_classes, use_cudnn=True if args.use_gpu else False)
File "/ssd4/moshuojie/code/ocr_recognition/infer.py", line 202, in main
inference(args)
File "/ssd4/moshuojie/code/ocr_recognition/infer.py", line 206, in
main()
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/runpy.py", line 263, in run_path
pkg_name=pkg_name, script_name=fname)
File "/home/mk/.vscode-server/extensions/ms-python.python-2020.11.371526539/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 267, in run_file
runpy.run_path(options.target, run_name=compat.force_str("main"))
File "/home/mk/.vscode-server/extensions/ms-python.python-2020.11.371526539/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 430, in main
run()
File "/home/mk/.vscode-server/extensions/ms-python.python-2020.11.371526539/pythonFiles/lib/python/debugpy/main.py", line 45, in
cli.main()
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/mk/anaconda3/envs/paddle_env/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"main", mod_spec)
Error Message Summary:
**Error: Tensor holds no memory. Call Tensor::mutable_data first.
[Hint: holder_ should not be null.] at (/home/george/paddle/paddle/fluid/framework/tensor.cc:23)
[operator < read_from_array > error]
Aborted (core dumped)**
3条答案
按热度按时间hrysbysz1#
您好,我们已经收到了您的问题,会安排技术人员尽快解答您的问题,请耐心等待。请您再次检查是否提供了清晰的问题描述、复现代码、环境&版本、报错信息等。同时,您也可以通过查看官网API文档、常见问题、历史Issue、AI社区来寻求解答。祝您生活愉快~
Hi! We've received your issue and please be patient to get responded. We will arrange technicians to answer your questions as soon as possible. Please make sure that you have posted enough message to demo your request. You may also check out the API,FAQ,Github Issue and AI community to get the answer.Have a nice day!
mcvgt66p2#
麻烦帖一下完整的infer.py代码
hrirmatl3#
@WenmuZhou infer.py代码:
def inference(args):
"""OCR inference"""
if args.model == "crnn_ctc":
infer = ctc_infer
get_feeder_data = get_ctc_feeder_for_infer
else:
infer = attention_infer
get_feeder_data = get_attention_feeder_for_infer
eos = 1
sos = 0
num_classes = data_reader.num_classes()
data_shape = data_reader.data_shape()
define network
if len(list(data_shape)) == 3:
data_shape = [None] + list(data_shape)
images = fluid.data(name='pixel', shape=data_shape, dtype='float32')
ids = infer(images, num_classes, use_cudnn=True if args.use_gpu else False)
data reader
infer_reader = data_reader.inference(
batch_size=args.batch_size,
infer_images_dir=args.input_images_dir,
infer_list_file=args.input_images_list,
cycle=True if args.iterations > 0 else False,
model=args.model)
prepare environment
place = fluid.CPUPlace()
if args.use_gpu:
place = fluid.CUDAPlace(0)
def prune(words, sos, eos):
"""Remove unused tokens in prediction result."""
start_index = 0
end_index = len(words)
if sos in words:
start_index = np.where(words == sos)[0][0] + 1
if eos in words:
end_index = np.where(words == eos)[0][0]
return words[start_index:end_index]
def main():
args = parser.parse_args()
print_arguments(args)
check_gpu(args.use_gpu)
if args.profile:
if args.use_gpu:
with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof:
inference(args)
else:
with profiler.profiler("CPU", sorted_key='total') as cpuprof:
inference(args)
else:
inference(args)