本文整理了Java中water.fvec.Frame.byteSize()
方法的一些代码示例,展示了Frame.byteSize()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Frame.byteSize()
方法的具体详情如下:
包路径:water.fvec.Frame
类名称:Frame
方法名:byteSize
暂无
代码示例来源:origin: h2oai/h2o-3
@Override protected int nModelsInParallel() {
if (!_parms._parallelize_cross_validation || _parms._max_runtime_secs != 0) return 1; //user demands serial building (or we need to honor the time constraints for all CV models equally)
if (_train.byteSize() < 1e6) return _parms._nfolds; //for small data, parallelize over CV models
return 1;
}
代码示例来源:origin: h2oai/h2o-3
@Override protected int nModelsInParallel() {
if (!_parms._parallelize_cross_validation || _parms._max_runtime_secs != 0) return 1; //user demands serial building (or we need to honor the time constraints for all CV models equally)
if (_train.byteSize() < 1e6) return _parms._nfolds; //for small data, parallelize over CV models
return 2; //GBM always has some serial work, so it's fine to build two models at once
}
代码示例来源:origin: h2oai/h2o-2
public long length() {
if (isFrame()) {
return ((Frame)get()).byteSize();
}
return _max;
}
代码示例来源:origin: h2oai/h2o-2
check += v.nChunks();
assert(total_chunk_count == check);
assert(total_chunk_byte_size == _fr.byteSize());
代码示例来源:origin: h2oai/h2o-2
@Override protected Boolean defaultValue() {
// Can we allocate ALL of the dataset locally?
long bs = fr().byteSize();
if( !MemoryManager.tryReserveTaskMem(bs) ) return false;
// Also, do we have enough chunks to run it well globally?
if( fr().anyVec().nChunks() >= 2*H2O.CLOUD.size() ) return false;
// Less than 2 chunks per node, and fits locally... default to local-only
return true;
}
}
代码示例来源:origin: h2oai/h2o-2
@Override protected Response serve() {
if( src_key == null ) return RequestServer._http404.serve();
numRows = src_key.numRows();
numCols = src_key.numCols();
Futures fs = new Futures();
for( int i=0; i<numCols; i++ )
src_key.vecs()[i].rollupStats(fs);
fs.blockForPending();
byteSize = src_key.byteSize();
cols = new ColSummary[numCols];
for( int i=0; i<cols.length; i++ )
cols[i] = new ColSummary(src_key._names[i],src_key.vecs()[i]);
return Response.done(this);
}
代码示例来源:origin: h2oai/h2o-2
private boolean canLoadAll(final Frame fr, ChunkAllocInfo cai) {
int nchks = fr.anyVec().nChunks();
long localBytes = 0l;
for (int i = 0; i < nchks; ++i) {
Key k = fr.anyVec().chunkKey(i);
if (k.home()) {
localBytes += fr.anyVec().chunkForChunkIdx(i).byteSize();
}
}
long memForNonLocal = fr.byteSize() - localBytes;
// Also must add in the RF internal data structure overhead
memForNonLocal += fr.numRows() * fr.numCols();
for(int i = 0; i < H2O.CLOUD._memary.length; i++) {
HeartBeat hb = H2O.CLOUD._memary[i]._heartbeat;
long nodeFreeMemory = (long)(hb.get_max_mem() * 0.8); // * OVERHEAD_MAGIC;
Log.debug(Log.Tag.Sys.RANDF, i + ": computed available mem: " + PrettyPrint.bytes(nodeFreeMemory));
Log.debug(Log.Tag.Sys.RANDF, i + ": remote chunks require: " + PrettyPrint.bytes(memForNonLocal));
if (nodeFreeMemory - memForNonLocal <= 0 || (nodeFreeMemory <= TWO_HUNDRED_MB && memForNonLocal >= ONE_FIFTY_MB)) {
Log.info("Node free memory raw: "+nodeFreeMemory);
cai.node = H2O.CLOUD._memary[i];
cai.availableMemory = nodeFreeMemory;
cai.requiredMemory = memForNonLocal;
return false;
}
}
return true;
}
代码示例来源:origin: h2oai/h2o-3
dl.error("_class_sampling_factors", "class_sampling_factors requires balance_classes to be enabled.");
if (_replicate_training_data && null != train() && train().byteSize() > 0.9*H2O.CLOUD.free_mem()/H2O.CLOUD.size() && H2O.CLOUD.size() > 1) {
dl.error("_replicate_training_data", "Compressed training dataset takes more than 90% of avg. free available memory per node (" + 0.9*H2O.CLOUD.free_mem()/H2O.CLOUD.size() + "), cannot run with replicate_training_data.");
代码示例来源:origin: h2oai/h2o-3
bytes = train.numRows() * model.model_info()._width * model.model_info()._height * model.model_info()._channels * 4;
} else {
bytes = train.byteSize();
代码示例来源:origin: ai.h2o/h2o-algos
@Override protected int nModelsInParallel() {
if (!_parms._parallelize_cross_validation || _parms._max_runtime_secs != 0) return 1; //user demands serial building (or we need to honor the time constraints for all CV models equally)
if (_train.byteSize() < 1e6) return _parms._nfolds; //for small data, parallelize over CV models
return 1;
}
代码示例来源:origin: ai.h2o/h2o-algos
@Override protected int nModelsInParallel() {
if (!_parms._parallelize_cross_validation || _parms._max_runtime_secs != 0) return 1; //user demands serial building (or we need to honor the time constraints for all CV models equally)
if (_train.byteSize() < 1e6) return _parms._nfolds; //for small data, parallelize over CV models
return 2; //GBM always has some serial work, so it's fine to build two models at once
}
代码示例来源:origin: h2oai/h2o-3
dl.error("_class_sampling_factors", "class_sampling_factors requires balance_classes to be enabled.");
if (_replicate_training_data && null != train() && train().byteSize() > 0.9*H2O.CLOUD.free_mem()/H2O.CLOUD.size() && H2O.CLOUD.size() > 1) {
dl.error("_replicate_training_data", "Compressed training dataset takes more than 90% of avg. free available memory per node (" + 0.9*H2O.CLOUD.free_mem()/H2O.CLOUD.size() + "), cannot run with replicate_training_data.");
代码示例来源:origin: ai.h2o/h2o-algos
dl.error("_class_sampling_factors", "class_sampling_factors requires balance_classes to be enabled.");
if (_replicate_training_data && null != train() && train().byteSize() > 0.9*H2O.CLOUD.free_mem()/H2O.CLOUD.size() && H2O.CLOUD.size() > 1) {
dl.error("_replicate_training_data", "Compressed training dataset takes more than 90% of avg. free available memory per node (" + 0.9*H2O.CLOUD.free_mem()/H2O.CLOUD.size() + "), cannot run with replicate_training_data.");
代码示例来源:origin: ai.h2o/h2o-algos
bytes = train.numRows() * model.model_info()._width * model.model_info()._height * model.model_info()._channels * 4;
} else {
bytes = train.byteSize();
代码示例来源:origin: ai.h2o/h2o-algos
dl.error("_class_sampling_factors", "class_sampling_factors requires balance_classes to be enabled.");
if (_replicate_training_data && null != train() && train().byteSize() > 0.9*H2O.CLOUD.free_mem()/H2O.CLOUD.size() && H2O.CLOUD.size() > 1) {
dl.error("_replicate_training_data", "Compressed training dataset takes more than 90% of avg. free available memory per node (" + 0.9*H2O.CLOUD.free_mem()/H2O.CLOUD.size() + "), cannot run with replicate_training_data.");
内容来源于网络,如有侵权,请联系作者删除!