ludwig pytorch/master CUDA OOM for kdd appetency w/automl; same config seems to train okay on tensorflow/tf-legacy

j7dteeu8  于 2个月前  发布在  其他
关注(0)|答案(8)|浏览(37)

失败的试验具有这种配置,这似乎在相同的大小的工作器上在tensorflow上训练得很好:

| Trial name     | status     | loc                 |   combiner.bn_momentum |   combiner.bn_virtual_bs |   combiner.num_steps |   combiner.output_size |   combiner.relaxation_factor |   combiner.size |   combiner.sparsity |   training.batch_size |   training.decay_rate |   training.decay_steps |   training.learning_rate |   iter |   total time (s) |   metric_score |

| trial_e6687d84 | ERROR      | 172.31.10.68:26084  |                   0.7  |                     2048 |                    4 |                      8 |                          1   |              32 |              0.0001 |                  8192 |                  0.95 |                    500 |                    0.025 |        |                  |                |
7nbnzgx9

7nbnzgx91#

堆栈转储为:

^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m 2022-01-23 08:17:12,441       ERROR function_runner.py:268 -- Runner Thread raised error.
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m Traceback (most recent call last):
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/tune/function_runner.py", line 262, in run
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     self._entrypoint()
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/tune/function_runner.py", line 331, in entrypoint
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     self._status_reporter.get_checkpoint())
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/util/tracing/tracing_helper.py", line 451, in _resume_span
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     return method(self, *_args, **_kwargs)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/tune/function_runner.py", line 597, in _trainable_func
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     output = fn()
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/tune/utils/trainable.py", line 344, in inner
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     trainable(config, **fn_kwargs)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/hyperopt/execution.py", line 699, in run_experiment_trial
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     config, checkpoint_dir, local_hyperopt_dict, self.decode_ctx, _is_ray_backend(backend)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/hyperopt/execution.py", line 568, in _run_experiment
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     _run()
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/hyperopt/execution.py", line 535, in _run
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     parameters=config,
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/hyperopt/execution.py", line 920, in run_experiment
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     debug=debug,
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/api.py", line 1078, in experiment
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     debug=debug,
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/api.py", line 538, in train
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     save_path=model_dir,
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/models/trainer.py", line 741, in train
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     self._train_loop(batcher, progress_tracker, save_path, train_summary_writer, progress_bar)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/models/trainer.py", line 931, in _train_loop
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     targets,
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/models/trainer.py", line 307, in train_step
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     model_outputs = self.model((inputs, targets))
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     return forward_call(*input, **kwargs)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/models/ecd.py", line 141, in forward
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     combiner_outputs = self.combiner(encoder_outputs)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     return forward_call(*input, **kwargs)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/combiners/combiners.py", line 491, in forward
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     hidden, aggregated_mask, masks = self.tabnet(hidden)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     return forward_call(*input, **kwargs)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/modules/tabnet_modules.py", line 118, in forward
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     mask_values = self.attentive_transforms[step_i](x[:, self.output_size :], prior_scales)  # [b_s, i_s]
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     return forward_call(*input, **kwargs)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/modules/tabnet_modules.py", line 252, in forward
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     return self.sparsemax(hidden)  # [b_s, s]
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     return forward_call(*input, **kwargs)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/utils/torch_utils.py", line 305, in forward
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     return sparsemax(input, self.dim)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/utils/torch_utils.py", line 252, in forward
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     tau, supp_size = SparsemaxFunction._threshold_and_support(input, dim=dim)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m   File "/home/ray/anaconda3/lib/python3.7/site-packages/ludwig/utils/torch_utils.py", line 285, in _threshold_and_support
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m     input_srt, _ = torch.sort(input, descending=True, dim=dim)
^[[2m^[[36m(ImplicitFunc pid=26034, ip=172.31.10.68)^[[0m RuntimeError: CUDA out of memory. Tried to allocate 322.00 MiB (GPU 0; 14.76 GiB total capacity; 13.30 GiB already allocated; 119.75 MiB free; 13.54 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation.  See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
sulc1iza

sulc1iza2#

请注意,kdd数据集需要在主分支和tf-legacy上进行修复;PRs可用。

qlckcl4x

qlckcl4x3#

问题可以通过运行以下代码来重现:

[https://github.com/ludwig-ai/experiments/blob/main/automl/validation/kdd/run_auto_train_1hr.py](https://github.com/ludwig-ai/experiments/blob/main/automl/validation/kdd/run_auto_train_1hr.py)
z9gpfhce

z9gpfhce4#

在3节点的ray集群上运行,所有节点都是g4dn.4xlarge https://aws.amazon.com/ec2/instance-types/g4/

zfycwa2u

zfycwa2u5#

我能够在使用高亮参数的Torch上复现OOM问题,其批处理大小为8192。然而,我在Tensorflow上也遇到了相同的OOM错误。我在ray上的Torch和Tensorflow上尝试了几个不同的批处理大小,并使用nvidia-smi来监控内存使用情况。

总体来看,我们可以看到在每个批处理大小下,Torch使用的内存比Tensorflow多一点。4096已经非常接近两个分支的内存最大值(15k MiB),所以8192对于任何一个分支来说都太大了,这并不令人惊讶。

@anneholler,我想知道你是否能在你的环境中复现这个发现。如果OOM问题不仅仅局限于torch,那么我认为我们可以关闭这个问题。

yvt65v4c

yvt65v4c6#

Hi, Justin,

Thanks for looking at the problem and for producing the interesting memory use graphs.

Unfortunately, I can still reproduce the problem.
  
To aid repro, I've created a 5min run (ignore the "1hr" in script name) that can be executed on a single node [head-only]
Ray gpu-enabled cluster [g4dn.4xlarge], in which the troublesome config is passed as an initial point to evaluate.
 This script passes on the tf-legacy branch:
  https://github.com/ludwig-ai/experiments/blob/main/automl/validation/kdd/run_auto_train_1hr_repro_tf.py
 This comparable script fails on the master branch:
  https://github.com/ludwig-ai/experiments/blob/main/automl/validation/kdd/run_auto_train_1hr_repro_master.py

One thought is that the problem could be associated with the datatypes inferred for the input features,
which are quite a bit different between master and tf-legacy.
 Latest automl config produced by Master:
  https://github.com/ludwig-ai/experiments/blob/main/automl/validation/kdd/auto_config.json.automl.appetency.master
 Latest automl config produced by tf-legacy:
  https://github.com/ludwig-ai/experiments/blob/main/automl/validation/kdd/auto_config.json.automl.appetency.tf

I'm thinking that in your repro work yesterday, you may have used the master inferred datatypes in your tf-legacy run,
and if the datatypes are the problem, that could be what made your tf-legacy runs fail.
gstyhher

gstyhher7#

感谢提供重现脚本!这对于更深入地研究这个问题确实非常有用。
的确,在我运行的实验中,我为 master 和 tf-legacy 使用了相同的显式指定类型,而不是依赖于每个分支自动推断的类型。
看起来,类型分配差异确实是问题所在。Master 为许多列分配了比 tf-legacy 更少的 text 类型,这可能与 this change 有关。以下是一些示例:
master :

{'column': 'Var191', 'name': 'Var191', 'type': 'category'},
                    {'column': 'Var192', 'name': 'Var192', 'type': 'text'},
                    {'column': 'Var193', 'name': 'Var193', 'type': 'text'},
                    {'column': 'Var194', 'name': 'Var194', 'type': 'category'},
                    {'column': 'Var195', 'name': 'Var195', 'type': 'text'},
                    {'column': 'Var196', 'name': 'Var196', 'type': 'category'},
                    {'column': 'Var197', 'name': 'Var197', 'type': 'text'},
                    {'column': 'Var198', 'name': 'Var198', 'type': 'text'},
                    {'column': 'Var199', 'name': 'Var199', 'type': 'text'},
                    {'column': 'Var200', 'name': 'Var200', 'type': 'text'},
                    {'column': 'Var201', 'name': 'Var201', 'type': 'category'},
                    {'column': 'Var202', 'name': 'Var202', 'type': 'text'},
                    {'column': 'Var203', 'name': 'Var203', 'type': 'category'},
                    {'column': 'Var204', 'name': 'Var204', 'type': 'text'},
                    {'column': 'Var205', 'name': 'Var205', 'type': 'category'},
                    {'column': 'Var206', 'name': 'Var206', 'type': 'text'},
                    {'column': 'Var207', 'name': 'Var207', 'type': 'category'},
                    {'column': 'Var208', 'name': 'Var208', 'type': 'category'},
                    {'column': 'Var210', 'name': 'Var210', 'type': 'category'},
                    {'column': 'Var211', 'name': 'Var211', 'type': 'category'},
                    {'column': 'Var212', 'name': 'Var212', 'type': 'text'},
                    {'column': 'Var213', 'name': 'Var213', 'type': 'category'},
                    {'column': 'Var214', 'name': 'Var214', 'type': 'text'},
                    {'column': 'Var215', 'name': 'Var215', 'type': 'category'},
                    {'column': 'Var216', 'name': 'Var216', 'type': 'text'},
                    {'column': 'Var217', 'name': 'Var217', 'type': 'text'},
                    {'column': 'Var218', 'name': 'Var218', 'type': 'category'},
                    {'column': 'Var219', 'name': 'Var219', 'type': 'text'},
                    {'column': 'Var220', 'name': 'Var220', 'type': 'text'},
                    {'column': 'Var221', 'name': 'Var221', 'type': 'category'},
                    {'column': 'Var222', 'name': 'Var222', 'type': 'text'},
                    {'column': 'Var223', 'name': 'Var223', 'type': 'category'},
                    {'column': 'Var224', 'name': 'Var224', 'type': 'category'},
                    {'column': 'Var225', 'name': 'Var225', 'type': 'category'},
                    {'column': 'Var226', 'name': 'Var226', 'type': 'text'},
                    {'column': 'Var227', 'name': 'Var227', 'type': 'category'},
                    {'column': 'Var228', 'name': 'Var228', 'type': 'text'},
                    {'column': 'Var229', 'name': 'Var229', 'type': 'category'}

vs. tf-legacy

{'column': 'Var191', 'name': 'Var191', 'type': 'category'},
                    {'column': 'Var192', 'name': 'Var192', 'type': 'category'},
                    {'column': 'Var193', 'name': 'Var193', 'type': 'category'},
                    {'column': 'Var194', 'name': 'Var194', 'type': 'category'},
                    {'column': 'Var195', 'name': 'Var195', 'type': 'category'},
                    {'column': 'Var196', 'name': 'Var196', 'type': 'category'},
                    {'column': 'Var197', 'name': 'Var197', 'type': 'category'},
                    {'column': 'Var198', 'name': 'Var198', 'type': 'category'},
                    {'column': 'Var199', 'name': 'Var199', 'type': 'category'},
                    {'column': 'Var200', 'name': 'Var200', 'type': 'category'},
                    {'column': 'Var201', 'name': 'Var201', 'type': 'category'},
                    {'column': 'Var202', 'name': 'Var202', 'type': 'category'},
                    {'column': 'Var203', 'name': 'Var203', 'type': 'category'},
                    {'column': 'Var204', 'name': 'Var204', 'type': 'category'},
                    {'column': 'Var205', 'name': 'Var205', 'type': 'category'},
                    {'column': 'Var206', 'name': 'Var206', 'type': 'category'},
                    {'column': 'Var207', 'name': 'Var207', 'type': 'category'},
                    {'column': 'Var208', 'name': 'Var208', 'type': 'category'},
                    {'column': 'Var209', 'name': 'Var209', 'type': 'numerical'},
                    {'column': 'Var210', 'name': 'Var210', 'type': 'category'},
                    {'column': 'Var211', 'name': 'Var211', 'type': 'category'},
                    {'column': 'Var212', 'name': 'Var212', 'type': 'category'},
                    {'column': 'Var213', 'name': 'Var213', 'type': 'category'},
                    {'column': 'Var214', 'name': 'Var214', 'type': 'category'},
                    {'column': 'Var215', 'name': 'Var215', 'type': 'category'},
                    {'column': 'Var216', 'name': 'Var216', 'type': 'category'},
                    {'column': 'Var217', 'name': 'Var217', 'type': 'category'},
                    {'column': 'Var218', 'name': 'Var218', 'type': 'category'},
                    {'column': 'Var219', 'name': 'Var219', 'type': 'category'},
                    {'column': 'Var220', 'name': 'Var220', 'type': 'category'},
                    {'column': 'Var221', 'name': 'Var221', 'type': 'category'},
                    {'column': 'Var222', 'name': 'Var222', 'type': 'category'},
                    {'column': 'Var223', 'name': 'Var223', 'type': 'category'},
                    {'column': 'Var224', 'name': 'Var224', 'type': 'category'},
                    {'column': 'Var225', 'name': 'Var225', 'type': 'category'},
                    {'column': 'Var226', 'name': 'Var226', 'type': 'category'},
                    {'column': 'Var227', 'name': 'Var227', 'type': 'category'},
                    {'column': 'Var228', 'name': 'Var228', 'type': 'category'},
                    {'column': 'Var229', 'name': 'Var229', 'type': 'category'},

文本特征的默认编码器是一个 parallel_cnn,它比类别特征的默认单密集层编码器要重得多。不出所料,具有更多文本特征的配置在两个分支上都遇到了内存不足(OOM)的问题。此外,当我为 torch 使用 tf-legacy 的列类型时,torch 不再出现 OOM 错误。 :)
这个问题现在已经解决了,我们可以看到在两个分支上使用完全相同的配置时,出现了相同的 OOM/非OOM行为,只是分支分配了不同的类型。
关于是否应该修复类型分配,以及/或将文本特征的默认编码器降级到某种不太激烈的类型(如 embed),这仍然是一个未解决的问题。无论如何,这样做可能是有益的。

相关问题