本文整理了Java中water.Job.stop_requested()
方法的一些代码示例,展示了Job.stop_requested()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Job.stop_requested()
方法的具体详情如下:
包路径:water.Job
类名称:Job
方法名:stop_requested
暂无
代码示例来源:origin: h2oai/h2o-3
@Override public void map( Chunk chks[] ) {
if (isCancelled() || j != null && j.stop_requested()) return;
double tmp [] = new double[_output._names.length];
double preds[] = new double[len];
for(int row = 0; row < chks[0]._len; row++) {
Arrays.fill(preds,0);
double p[] = score_indicator(chks, row, tmp, preds);
for(int c = 0; c < preds.length; c++)
chks[_output._names.length + c].set(row, p[c]);
}
if (j != null) j.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: h2oai/h2o-3
public boolean progress(double [] beta, double likelihood) {
_state._iter++;
_state.updateState(beta,likelihood);
if(!_parms._lambda_search)
updateProgress(true);
boolean converged = _state.converged();
if(converged) Log.info(LogMsg(_state.convergenceMsg));
return !_job.stop_requested() && !converged && _state._iter < _parms._max_iterations ;
}
代码示例来源:origin: h2oai/h2o-3
@Override public void map( Chunk chks[] ) {
if (isCancelled() || j != null && j.stop_requested()) return;
double tmp [] = new double[_output._names.length];
double preds[] = new double[_parms._nv];
for( int row = 0; row < chks[0]._len; row++) {
double p[] = score0(chks, row, tmp, preds);
for( int c=0; c<preds.length; c++ )
chks[_output._names.length+c].set(row, p[c]);
}
if (j !=null) j.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: h2oai/h2o-3
@Override public void map( Chunk chks[] ) {
if (isCancelled() || j != null && j.stop_requested()) return;
double tmp [] = new double[_output._names.length];
double preds[] = new double[_parms._k];
for( int row = 0; row < chks[0]._len; row++) {
double p[] = score0(chks, row, tmp, preds);
for( int c=0; c<preds.length; c++ )
chks[_output._names.length+c].set(row, p[c]);
}
if (j != null) j.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: h2oai/h2o-3
@Override public boolean progress(double [] beta, GradientInfo ginfo) {
_state._iter++;
if(ginfo instanceof ProximalGradientInfo) {
ginfo = ((ProximalGradientInfo) ginfo)._origGinfo;
GLMGradientInfo gginfo = (GLMGradientInfo) ginfo;
_state.updateState(beta, gginfo);
if (!_parms._lambda_search)
updateProgress(false);
return !timeout() && !_job.stop_requested() && _state._iter < _parms._max_iterations;
} else {
GLMGradientInfo gginfo = (GLMGradientInfo) ginfo;
if(gginfo._gradient == null)
_state.updateState(beta,gginfo._likelihood);
else
_state.updateState(beta, gginfo);
if (!_parms._lambda_search)
updateProgress(true);
boolean converged = _state.converged();
if (converged) Log.info(LogMsg(_state.convergenceMsg));
return !timeout() && !_job.stop_requested() && !converged && _state._iter < _parms._max_iterations;
}
}
代码示例来源:origin: h2oai/h2o-3
@Override public void map(Chunk[] chks) {
if(_job != null && _job.stop_requested()) throw new Job.JobCancelledException();
int numStart = _dinfo.numStart();
int K = _beta[0].length;// number of classes
代码示例来源:origin: h2oai/h2o-3
@Override public void map(Chunk[] chks) {
if( _jobKey.get().stop_requested() ) return;
_nobs = 0;
_rescnt = new int[_nrescat];
代码示例来源:origin: h2oai/h2o-3
@Override public void map( Chunk chks[] ) {
if (isCancelled() || job !=null && job.stop_requested()) throw new Job.JobCancelledException();
double tmp [] = new double[len];
final Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info);
for( int row=0; row<chks[0]._len; row++ ) {
for( int i=0; i<len; i++ )
tmp[i] = chks[i].atd(row);
((Neurons.Input)neurons[0]).setInput(-1, tmp, mb); //FIXME: No weights yet
DeepLearningTask.fpropMiniBatch(-1, neurons, model_info, null, false, null, null /*no offset*/, n);
double[] out = neurons[layer+1]._a[mb].raw(); //extract the layer-th hidden feature
for( int c=0; c<features; c++ )
chks[_output._names.length+c].set(row,out[c]);
}
if (job != null) job.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: h2oai/h2o-3
@Override public void map(Chunk[] chks) {
if(_job != null && _job.stop_requested()) throw new Job.JobCancelledException();
chunkInit();
// compute
if(_sparse) {
for(Row r:_dinfo.extractSparseRows(chks)) {
if(!r.isBad() && r.weight != 0)
processRow(r);
}
} else {
Row row = _dinfo.newDenseRow();
for(int r = 0 ; r < chks[0]._len; ++r) {
_dinfo.extractDenseRow(chks, r, row);
if(!row.isBad() && row.weight != 0)
processRow(row);
}
}
chunkDone();
}
}
代码示例来源:origin: h2oai/h2o-3
public void map(Chunk[] chks, NewChunk[] preds) {
if (isCancelled() || _j != null && _j.stop_requested()) return;
if(_m._parms._family == GLMModel.GLMParameters.Family.multinomial ||
_m._parms._family == GLMModel.GLMParameters.Family.ordinal)
代码示例来源:origin: h2oai/h2o-3
if (parentJob.stop_requested()) {
userFeedback.info(Stage.ModelTraining, "AutoML job cancelled; skipping " + name);
subJob.stop();
代码示例来源:origin: h2oai/h2o-3
if (isCancelled() || _j != null && _j.stop_requested()) return;
double weight = weightIdx == -1 ? 1 : _fr.vec(weightIdx).at(i);
if (weight == 0) { //don't send observations with weight 0 to the GPU
if (isCancelled() || _j != null && _j.stop_requested()) return;
float[] data = iter.getData();
float[] predFloats = model_info().predict(data);
代码示例来源:origin: h2oai/h2o-3
if(_jobKey != null && _jobKey.get() != null && _jobKey.get().stop_requested()) throw new Job.JobCancelledException();
final int nrows = chunks[0]._len;
final long offset = chunks[0].start();
代码示例来源:origin: ai.h2o/h2o-algos
@Override public void map( Chunk chks[] ) {
if (isCancelled() || j != null && j.stop_requested()) return;
double tmp [] = new double[_output._names.length];
double preds[] = new double[len];
for(int row = 0; row < chks[0]._len; row++) {
Arrays.fill(preds,0);
double p[] = score_indicator(chks, row, tmp, preds);
for(int c = 0; c < preds.length; c++)
chks[_output._names.length + c].set(row, p[c]);
}
if (j != null) j.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: ai.h2o/h2o-algos
@Override public void map( Chunk chks[] ) {
if (isCancelled() || j != null && j.stop_requested()) return;
double tmp [] = new double[_output._names.length];
double preds[] = new double[_parms._nv];
for( int row = 0; row < chks[0]._len; row++) {
double p[] = score0(chks, row, tmp, preds);
for( int c=0; c<preds.length; c++ )
chks[_output._names.length+c].set(row, p[c]);
}
if (j !=null) j.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: ai.h2o/h2o-algos
public boolean progress(double [] beta, double likelihood) {
_state._iter++;
_state.updateState(beta,likelihood);
if(!_parms._lambda_search)
updateProgress(true);
boolean converged = _state.converged();
if(converged) Log.info(LogMsg(_state.convergenceMsg));
return !_job.stop_requested() && !converged && _state._iter < _parms._max_iterations ;
}
代码示例来源:origin: ai.h2o/h2o-algos
@Override public void map( Chunk chks[] ) {
if (isCancelled() || j != null && j.stop_requested()) return;
double tmp [] = new double[_output._names.length];
double preds[] = new double[_parms._k];
for( int row = 0; row < chks[0]._len; row++) {
double p[] = score0(chks, row, tmp, preds);
for( int c=0; c<preds.length; c++ )
chks[_output._names.length+c].set(row, p[c]);
}
if (j != null) j.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: ai.h2o/h2o-algos
@Override public boolean progress(double [] beta, GradientInfo ginfo) {
_state._iter++;
if(ginfo instanceof ProximalGradientInfo) {
ginfo = ((ProximalGradientInfo) ginfo)._origGinfo;
GLMGradientInfo gginfo = (GLMGradientInfo) ginfo;
_state.updateState(beta, gginfo);
if (!_parms._lambda_search)
updateProgress(false);
return !timeout() && !_job.stop_requested() && _state._iter < _parms._max_iterations;
} else {
GLMGradientInfo gginfo = (GLMGradientInfo) ginfo;
if(gginfo._gradient == null)
_state.updateState(beta,gginfo._likelihood);
else
_state.updateState(beta, gginfo);
if (!_parms._lambda_search)
updateProgress(true);
boolean converged = _state.converged();
if (converged) Log.info(LogMsg(_state.convergenceMsg));
return !timeout() && !_job.stop_requested() && !converged && _state._iter < _parms._max_iterations;
}
}
代码示例来源:origin: ai.h2o/h2o-algos
@Override public void map( Chunk chks[] ) {
if (isCancelled() || job !=null && job.stop_requested()) throw new Job.JobCancelledException();
double tmp [] = new double[len];
final Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info);
for( int row=0; row<chks[0]._len; row++ ) {
for( int i=0; i<len; i++ )
tmp[i] = chks[i].atd(row);
((Neurons.Input)neurons[0]).setInput(-1, tmp, mb); //FIXME: No weights yet
DeepLearningTask.fpropMiniBatch(-1, neurons, model_info, null, false, null, null /*no offset*/, n);
double[] out = neurons[layer+1]._a[mb].raw(); //extract the layer-th hidden feature
for( int c=0; c<features; c++ )
chks[_output._names.length+c].set(row,out[c]);
}
if (job != null) job.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: ai.h2o/h2o-algos
@Override public void map(Chunk[] chks) {
if(_job != null && _job.stop_requested()) throw new Job.JobCancelledException();
chunkInit();
// compute
if(_sparse) {
for(Row r:_dinfo.extractSparseRows(chks)) {
if(!r.isBad() && r.weight != 0)
processRow(r);
}
} else {
Row row = _dinfo.newDenseRow();
for(int r = 0 ; r < chks[0]._len; ++r) {
_dinfo.extractDenseRow(chks, r, row);
if(!row.isBad() && row.weight != 0)
processRow(row);
}
}
chunkDone();
}
}
内容来源于网络,如有侵权,请联系作者删除!