本文整理了Java中water.Job.update()
方法的一些代码示例,展示了Job.update()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Job.update()
方法的具体详情如下:
包路径:water.Job
类名称:Job
方法名:update
暂无
代码示例来源:origin: h2oai/h2o-3
@Override public void map( Chunk chk ) {
_matches = new String[1]; // Result holders; will lazy expand
_offsets = new long [1];
ByteSeq bs = new ByteSeq(chk,chk.nextChunk());
// We already checked that this is an instance of a ByteVec, which means
// all the Chunks contain raw text as byte arrays.
Matcher m = _pattern.matcher(bs);
while( m.find() && m.start() < bs._bs0.length )
add(bs.str(m.start(),m.end()),chk.start()+m.start());
_job.update(chk._len); // Whole chunk of work, done all at once
}
@Override public void reduce( GrepGrep gg1 ) {
代码示例来源:origin: h2oai/h2o-3
@Override public void map( Chunk chks[] ) {
if (isCancelled() || j != null && j.stop_requested()) return;
double tmp [] = new double[_output._names.length];
double preds[] = new double[len];
for(int row = 0; row < chks[0]._len; row++) {
Arrays.fill(preds,0);
double p[] = score_indicator(chks, row, tmp, preds);
for(int c = 0; c < preds.length; c++)
chks[_output._names.length + c].set(row, p[c]);
}
if (j != null) j.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: h2oai/h2o-3
@Override public void map( Chunk chks[] ) {
if (isCancelled() || j != null && j.stop_requested()) return;
double tmp [] = new double[_output._names.length];
double preds[] = new double[_parms._nv];
for( int row = 0; row < chks[0]._len; row++) {
double p[] = score0(chks, row, tmp, preds);
for( int c=0; c<preds.length; c++ )
chks[_output._names.length+c].set(row, p[c]);
}
if (j !=null) j.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: h2oai/h2o-3
@Override public void map( Chunk chks[] ) {
if (isCancelled() || j != null && j.stop_requested()) return;
double tmp [] = new double[_output._names.length];
double preds[] = new double[_parms._k];
for( int row = 0; row < chks[0]._len; row++) {
double p[] = score0(chks, row, tmp, preds);
for( int c=0; c<preds.length; c++ )
chks[_output._names.length+c].set(row, p[c]);
}
if (j != null) j.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: h2oai/h2o-3
@Override
public void map(Chunk[] cs) {
int wordPos = (int) cs[0].start();
int pos = _wordVecSize * wordPos;
for (int i = 0; i < cs[0]._len; i++) {
_words[wordPos++] = cs[0].atStr(new BufferedString(), i);
for (int j = 1; j < cs.length; j++)
_syn0[pos++] = (float) cs[j].atd(i);
}
_job.update(1);
}
代码示例来源:origin: h2oai/h2o-3
private void progressUpdate(Key<Job> job_key, boolean keep_running) {
updateTiming(job_key);
Job job = job_key.get();
double progress = job.progress();
// Log.info("2nd speed: (samples: " + model_info().get_processed_total() + ", total_run_time: " + total_training_time_ms + ", total_scoring_time: " + total_scoring_time_ms + ", total_setup_time: " + total_setup_time_ms + ")");
float speed = (float)(model_info().get_processed_total() * 1000. / (total_training_time_ms -total_scoring_time_ms-total_setup_time_ms));
assert(speed >= 0) : "negative speed computed! (total_run_time: " + total_training_time_ms + ", total_scoring_time: " + total_scoring_time_ms + ", total_setup_time: " + total_setup_time_ms + ")";
String msg =
"Iterations: " + String.format("%,d", iterations)
+ ". Epochs: " + String.format("%g", epoch_counter)
+ ". Speed: " + (speed>10 ? String.format("%d", (int)speed) : String.format("%g", speed)) + " samples/sec."
+ (progress == 0 ? "" : " Estimated time left: " + PrettyPrint.msecs((long) (total_training_time_ms * (1. - progress) / progress), true));
job.update(actual_train_samples_per_iteration,msg); //mark the amount of work done for the progress bar
long now = System.currentTimeMillis();
long sinceLastPrint = now -_timeLastPrintStart;
if (!keep_running || sinceLastPrint > get_params()._score_interval * 1000) { //print this after every score_interval, not considering duty cycle
_timeLastPrintStart = now;
if (!get_params()._quiet_mode) {
Log.info(
"Training time: " + PrettyPrint.msecs(total_training_time_ms, true) + " (scoring: " + PrettyPrint.msecs(total_scoring_time_ms, true) + "). "
+ "Processed " + String.format("%,d", model_info().get_processed_total()) + " samples" + " (" + String.format("%.3f", epoch_counter) + " epochs).\n");
Log.info(msg);
}
}
}
代码示例来源:origin: h2oai/h2o-3
private void progressUpdate(Key<Job> job_key, boolean keep_running) {
updateTiming(job_key);
Job job = job_key.get();
double progress = job.progress();
// Log.info("2nd speed: (samples: " + model_info().get_processed_total() + ", total_run_time: " + total_training_time_ms + ", total_scoring_time: " + total_scoring_time_ms + ", total_setup_time: " + total_setup_time_ms + ")");
int speed = (int)(model_info().get_processed_total() * 1000. / (total_training_time_ms -total_scoring_time_ms-total_setup_time_ms));
assert(speed >= 0) : "negative speed computed! (total_run_time: " + total_training_time_ms + ", total_scoring_time: " + total_scoring_time_ms + ", total_setup_time: " + total_setup_time_ms + ")";
String msg =
"Iterations: " + String.format("%,d", iterations)
+ ". Epochs: " + String.format("%g", epoch_counter)
+ ". Speed: " + String.format("%,d", speed) + " samples/sec."
+ (progress == 0 ? "" : " Estimated time left: " + PrettyPrint.msecs((long) (total_training_time_ms * (1. - progress) / progress), true));
job.update(actual_train_samples_per_iteration,msg); //mark the amount of work done for the progress bar
long now = System.currentTimeMillis();
long sinceLastPrint = now -_timeLastPrintStart;
if (!keep_running || sinceLastPrint > get_params()._score_interval * 1000) { //print this after every score_interval, not considering duty cycle
_timeLastPrintStart = now;
if (!get_params()._quiet_mode) {
Log.info(
"Training time: " + PrettyPrint.msecs(total_training_time_ms, true) + " (scoring: " + PrettyPrint.msecs(total_scoring_time_ms, true) + "). "
+ "Processed " + String.format("%,d", model_info().get_processed_total()) + " samples" + " (" + String.format("%.3f", epoch_counter) + " epochs).\n");
Log.info(msg);
}
}
}
代码示例来源:origin: h2oai/h2o-3
@Override public void computeImpl() {
ExampleModel model = null;
try {
init(true);
// The model to be built
model = new ExampleModel(_job._result, _parms, new ExampleModel.ExampleOutput(Example.this));
model.delete_and_lock(_job);
// ---
// Run the main Example Loop
// Stop after enough iterations
for( ; model._output._iterations < _parms._max_iterations; model._output._iterations++ ) {
if( stop_requested() ) break; // Stopped/cancelled
double[] maxs = new Max().doAll(_parms.train())._maxs;
// Fill in the model
model._output._maxs = maxs;
model.update(_job); // Update model in K/V store
_job.update(1); // One unit of work
StringBuilder sb = new StringBuilder();
sb.append("Example: iter: ").append(model._output._iterations);
Log.info(sb);
}
} finally {
if( model != null ) model.unlock(_job);
}
}
}
代码示例来源:origin: h2oai/h2o-3
if (_parms._force_load_balance || _parms._reproducible) { //this is called before the parameters are sanitized, so force_load_balance might be user-disabled -> so must check reproducible flag as well
int original_chunks = original_fr.anyVec().nChunks();
_job.update(0,"Load balancing " + name.substring(name.length() - 5) + " data...");
int chunks = desiredChunks(original_fr, local);
if (!_parms._reproducible) {
代码示例来源:origin: h2oai/h2o-3
protected void updateProgress(boolean canScore){
assert !_parms._lambda_search;
_sc.addIterationScore(_state._iter, _state.likelihood(), _state.objective());
_job.update(_workPerIteration,_state.toString());
if(canScore && (_parms._score_each_iteration || timeSinceLastScoring() > _scoringInterval)) {
_model.update(_state.expandBeta(_state.beta()), -1, -1, _state._iter);
scoreAndUpdateModel();
}
}
}
代码示例来源:origin: h2oai/h2o-3
_job.update(1);
代码示例来源:origin: h2oai/h2o-3
@Override public void map( Chunk chks[] ) {
if (isCancelled() || job !=null && job.stop_requested()) throw new Job.JobCancelledException();
double tmp [] = new double[len];
final Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info);
for( int row=0; row<chks[0]._len; row++ ) {
for( int i=0; i<len; i++ )
tmp[i] = chks[i].atd(row);
((Neurons.Input)neurons[0]).setInput(-1, tmp, mb); //FIXME: No weights yet
DeepLearningTask.fpropMiniBatch(-1, neurons, model_info, null, false, null, null /*no offset*/, n);
double[] out = neurons[layer+1]._a[mb].raw(); //extract the layer-th hidden feature
for( int c=0; c<features; c++ )
chks[_output._names.length+c].set(row,out[c]);
}
if (job != null) job.update(1);
}
}.doAll(adaptFrm);
代码示例来源:origin: h2oai/h2o-3
_job.update(0, "Initializing model training");
Timer iterTimer = null;
CoxPHTask coxMR = null;
_job.update(1, "Running iteration 0");
for (int i = 0; i <= model._parms._max_iterations; ++i) {
iterTimer = new Timer();
_parms._ties).doAll(dinfo._adaptedFrame);
Log.info("CoxPHTask: iter=" + i + ", time=" + aggregTimer.toString());
_job.update(1);
_job.update(1, "Iteration = " + i + "/" + model._parms._max_iterations + ", logLik = " + logLik);
if (i != model._parms._max_iterations)
Log.info("CoxPH Iteration: iter=" + i + ", " + iterTimer.toString());
代码示例来源:origin: h2oai/h2o-3
for (Exemplar e: _exemplars) sum+=e._cnt;
assert(sum <= chks[0].len());
((Job)_jobKey.get()).update(1, "Aggregating.");
代码示例来源:origin: h2oai/h2o-3
void compute() {
try {
B builder = createBuilder();
if (_hasMetalearnerParams) {
builder._parms = _metalearner_parameters;
}
setCommonParams(builder._parms);
setCrossValidationParams(builder._parms);
setCustomParams(builder._parms);
builder.init(false);
Job<M> j = builder.trainModel();
while (j.isRunning()) {
try {
_job.update(j._work, "training metalearner(" + _model._parms._metalearner_algorithm + ")");
Thread.sleep(100);
} catch (InterruptedException ignored) {
}
}
Log.info("Finished training metalearner model(" + _model._parms._metalearner_algorithm + ").");
_model._output._metalearner = builder.get();
_model.doScoreOrCopyMetrics(_job);
if (_parms._keep_levelone_frame) {
_model._output._levelone_frame_id = _levelOneTrainingFrame; //Keep Level One Training Frame in Stacked Ensemble model object
}
} finally {
cleanup();
_model.update(_job);
_model.unlock(_job);
}
}
代码示例来源:origin: h2oai/h2o-3
for(Exemplar e: _exemplars) sum+=e._cnt;
assert(sum == localCounts + remoteCounts);
((Job)_jobKey.get()).update(1, "Aggregating.");
代码示例来源:origin: h2oai/h2o-3
_job.update(1, "Forming small matrix B = Q'A for direct SVD");
SMulTask stsk = new SMulTask(dinfo, _parms._nv, _ncolExp);
_job.update(1, "Forming distributed orthonormal matrix U");
u=makeUVec(model, u_name, u, qfrm, new Matrix(stsk._atq), svdJ);
model._output._d = ArrayUtils.mult((Arrays.copyOfRange(ArrayUtils.sqrtArr(svdJ.getSingularValues()),
_job.update(1, "Calculating SVD of small matrix locally");
Matrix atqJ = new Matrix(stsk._atq);
SingularValueDecomposition svdJ = atqJ.svd();
_job.update(1, "Forming distributed orthonormal matrix U");
代码示例来源:origin: h2oai/h2o-3
if (_j != null) _j.update(1);
代码示例来源:origin: h2oai/h2o-3
@Override
public void computeImpl() {
NaiveBayesModel model = null;
DataInfo dinfo = null;
try {
init(true); // Initialize parameters
if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(NaiveBayes.this);
dinfo = new DataInfo(_train, _valid, 1, false, DataInfo.TransformType.NONE, DataInfo.TransformType.NONE, true, false, false, _weights!=null, false, _fold!=null);
// The model to be built
model = new NaiveBayesModel(dest(), _parms, new NaiveBayesOutput(NaiveBayes.this));
model.delete_and_lock(_job);
_job.update(1, "Begin distributed Naive Bayes calculation");
NBTask tsk = new NBTask(_job._key, dinfo, _response.cardinality()).doAll(dinfo._adaptedFrame);
if (computeStatsFillModel(model, dinfo, tsk))
model.update(_job);
} finally {
if (model != null) model.unlock(_job);
if (dinfo != null) dinfo.remove();
}
}
}
代码示例来源:origin: h2oai/h2o-3
if (previous == null) throw new IllegalArgumentException("Checkpoint not found.");
Log.info("Resuming from checkpoint.");
_job.update(0,"Resuming from checkpoint");
内容来源于网络,如有侵权,请联系作者删除!