de.lmu.ifi.dbs.elki.logging.statistics.Duration.end()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(19.4k)|赞(0)|评价(0)|浏览(133)

本文整理了Java中de.lmu.ifi.dbs.elki.logging.statistics.Duration.end()方法的一些代码示例,展示了Duration.end()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Duration.end()方法的具体详情如下:
包路径:de.lmu.ifi.dbs.elki.logging.statistics.Duration
类名称:Duration
方法名:end

Duration.end介绍

[英]Finish the timer.
[中]完成计时器。

代码示例

代码示例来源:origin: de.lmu.ifi.dbs.elki/elki

/**
 * Iterate over the k range.
 *
 * @param prefix Prefix string
 * @param startk Start k
 * @param stepk Step k
 * @param maxk Max k
 * @param runner Runner to run
 */
private void runForEachK(String prefix, int startk, int stepk, int maxk, AlgRunner runner) {
 if(isDisabled(prefix)) {
  LOG.verbose("Skipping (disabled): " + prefix);
  return; // Disabled
 }
 LOG.verbose("Running " + prefix);
 final int digits = (int) Math.ceil(Math.log10(maxk + 1));
 final String format = "%s-%0" + digits + "d";
 for(int k = startk; k <= maxk; k += stepk) {
  Duration time = LOG.newDuration(this.getClass().getCanonicalName() + "." + prefix + ".k" + k + ".runtime").begin();
  runner.run(k, String.format(Locale.ROOT, format, prefix, k));
  LOG.statistics(time.end());
 }
}

代码示例来源:origin: elki-project/elki

/**
 * Choose the initial means.
 *
 * @param database Database
 * @param relation Relation
 * @return Means
 */
protected double[][] initialMeans(Database database, Relation<V> relation) {
 Duration inittime = getLogger().newDuration(initializer.getClass() + ".time").begin();
 double[][] means = initializer.chooseInitialMeans(database, relation, k, getDistanceFunction());
 getLogger().statistics(inittime.end());
 return means;
}

代码示例来源:origin: de.lmu.ifi.dbs.elki/elki-clustering

/**
 * Choose the initial means.
 *
 * @param database Database
 * @param relation Relation
 * @return Means
 */
protected double[][] initialMeans(Database database, Relation<V> relation) {
 Duration inittime = getLogger().newDuration(initializer.getClass() + ".time").begin();
 double[][] means = initializer.chooseInitialMeans(database, relation, k, getDistanceFunction());
 getLogger().statistics(inittime.end());
 return means;
}

代码示例来源:origin: elki-project/elki

/**
 * Choose the initial medoids.
 *
 * @param distQ Distance query
 * @param ids IDs to choose from
 * @return Initial medoids
 */
protected ArrayModifiableDBIDs initialMedoids(DistanceQuery<V> distQ, DBIDs ids) {
 if(getLogger().isStatistics()) {
  getLogger().statistics(new StringStatistic(getClass().getName() + ".initialization", initializer.toString()));
 }
 Duration initd = getLogger().newDuration(getClass().getName() + ".initialization-time").begin();
 ArrayModifiableDBIDs medoids = DBIDUtil.newArray(initializer.chooseInitialMedoids(k, ids, distQ));
 getLogger().statistics(initd.end());
 if(medoids.size() != k) {
  throw new AbortException("Initializer " + initializer.toString() + " did not return " + k + " means, but " + medoids.size());
 }
 return medoids;
}

代码示例来源:origin: de.lmu.ifi.dbs.elki/elki-clustering

/**
 * Choose the initial medoids.
 *
 * @param distQ Distance query
 * @param ids IDs to choose from
 * @return Initial medoids
 */
protected ArrayModifiableDBIDs initialMedoids(DistanceQuery<V> distQ, DBIDs ids) {
 if(getLogger().isStatistics()) {
  getLogger().statistics(new StringStatistic(getClass().getName() + ".initialization", initializer.toString()));
 }
 Duration initd = getLogger().newDuration(getClass().getName() + ".initialization-time").begin();
 ArrayModifiableDBIDs medoids = DBIDUtil.newArray(initializer.chooseInitialMedoids(k, ids, distQ));
 getLogger().statistics(initd.end());
 if(medoids.size() != k) {
  throw new AbortException("Initializer " + initializer.toString() + " did not return " + k + " means, but " + medoids.size());
 }
 return medoids;
}

代码示例来源:origin: elki-project/elki

/**
 * Iterate over the k range.
 *
 * @param prefix Prefix string
 * @param mink Minimum value of k for this method
 * @param maxk Maximum value of k for this method
 * @param runner Runner to run
 * @param out Output function
 */
private void runForEachK(String prefix, int mink, int maxk, IntFunction<OutlierResult> runner, BiConsumer<String, OutlierResult> out) {
 if(isDisabled(prefix)) {
  LOG.verbose("Skipping (disabled): " + prefix);
  return; // Disabled
 }
 LOG.verbose("Running " + prefix);
 final int digits = (int) FastMath.ceil(FastMath.log10(krange.getMax() + 1));
 final String format = "%s-%0" + digits + "d";
 krange.forEach(k -> {
  if(k >= mink && k <= maxk) {
   Duration time = LOG.newDuration(this.getClass().getCanonicalName() + "." + prefix + ".k" + k + ".runtime").begin();
   OutlierResult result = runner.apply(k);
   LOG.statistics(time.end());
   if(result != null) {
    out.accept(String.format(Locale.ROOT, format, prefix, k), result);
    result.getHierarchy().removeSubtree(result);
   }
  }
 });
}

代码示例来源:origin: de.lmu.ifi.dbs.elki/elki

/**
 * Run the Eclat algorithm
 * 
 * @param db Database to process
 * @param relation Bit vector relation
 * @return Frequent patterns found
 */
public FrequentItemsetsResult run(Database db, final Relation<BitVector> relation) {
 // TODO: implement with resizable arrays, to not need dim.
 final int dim = RelationUtil.dimensionality(relation);
 final VectorFieldTypeInformation<BitVector> meta = RelationUtil.assumeVectorField(relation);
 // Compute absolute minsupport
 final int minsupp = getMinimumSupport(relation.size());
 LOG.verbose("Build 1-dimensional transaction lists.");
 Duration ctime = LOG.newDuration(STAT + "eclat.transposition.time").begin();
 DBIDs[] idx = buildIndex(relation, dim, minsupp);
 LOG.statistics(ctime.end());
 FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("Building frequent itemsets", idx.length, LOG) : null;
 Duration etime = LOG.newDuration(STAT + "eclat.extraction.time").begin();
 final List<Itemset> solution = new ArrayList<>();
 for(int i = 0; i < idx.length; i++) {
  LOG.incrementProcessed(prog);
  extractItemsets(idx, i, minsupp, solution);
 }
 LOG.ensureCompleted(prog);
 Collections.sort(solution);
 LOG.statistics(etime.end());
 LOG.statistics(new LongStatistic(STAT + "frequent-itemsets", solution.size()));
 return new FrequentItemsetsResult("Eclat", "eclat", solution, meta);
}

代码示例来源:origin: elki-project/elki

/**
 * Run the Eclat algorithm
 * 
 * @param db Database to process
 * @param relation Bit vector relation
 * @return Frequent patterns found
 */
public FrequentItemsetsResult run(Database db, final Relation<BitVector> relation) {
 // TODO: implement with resizable arrays, to not need dim.
 final int dim = RelationUtil.dimensionality(relation);
 final VectorFieldTypeInformation<BitVector> meta = RelationUtil.assumeVectorField(relation);
 // Compute absolute minsupport
 final int minsupp = getMinimumSupport(relation.size());
 LOG.verbose("Build 1-dimensional transaction lists.");
 Duration ctime = LOG.newDuration(STAT + "eclat.transposition.time").begin();
 DBIDs[] idx = buildIndex(relation, dim, minsupp);
 LOG.statistics(ctime.end());
 FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("Building frequent itemsets", idx.length, LOG) : null;
 Duration etime = LOG.newDuration(STAT + "eclat.extraction.time").begin();
 final List<Itemset> solution = new ArrayList<>();
 for(int i = 0; i < idx.length; i++) {
  LOG.incrementProcessed(prog);
  extractItemsets(idx, i, minsupp, solution);
 }
 LOG.ensureCompleted(prog);
 Collections.sort(solution);
 LOG.statistics(etime.end());
 LOG.statistics(new LongStatistic(STAT + "frequent-itemsets", solution.size()));
 return new FrequentItemsetsResult("Eclat", "eclat", solution, meta, relation.size());
}

代码示例来源:origin: elki-project/elki

/**
 * Perform the preprocessing step.
 * 
 * @param modelcls Class of models
 * @param relation Data relation
 * @param query Range query
 * @return Precomputed models
 */
public DataStore<M> preprocess(Class<? super M> modelcls, Relation<O> relation, RangeQuery<O> query) {
 WritableDataStore<M> storage = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, modelcls);
 Duration time = getLogger().newDuration(this.getClass().getName() + ".preprocessing-time").begin();
 FiniteProgress progress = getLogger().isVerbose() ? new FiniteProgress(this.getClass().getName(), relation.size(), getLogger()) : null;
 for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
  DoubleDBIDList neighbors = query.getRangeForDBID(iditer, epsilon);
  storage.put(iditer, computeLocalModel(iditer, neighbors, relation));
  getLogger().incrementProcessed(progress);
 }
 getLogger().ensureCompleted(progress);
 getLogger().statistics(time.end());
 return storage;
}

代码示例来源:origin: elki-project/elki

/**
 * Full instantiation method.
 * 
 * @param database Database
 * @param relation Vector relation
 * @return Instance
 */
public COPACNeighborPredicate.Instance instantiate(Database database, Relation<V> relation) {
 DistanceQuery<V> dq = database.getDistanceQuery(relation, EuclideanDistanceFunction.STATIC);
 KNNQuery<V> knnq = database.getKNNQuery(dq, settings.k);
 WritableDataStore<COPACModel> storage = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, COPACModel.class);
 Duration time = LOG.newDuration(this.getClass().getName() + ".preprocessing-time").begin();
 FiniteProgress progress = LOG.isVerbose() ? new FiniteProgress(this.getClass().getName(), relation.size(), LOG) : null;
 for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
  DoubleDBIDList ref = knnq.getKNNForDBID(iditer, settings.k);
  storage.put(iditer, computeLocalModel(iditer, ref, relation));
  LOG.incrementProcessed(progress);
 }
 LOG.ensureCompleted(progress);
 LOG.statistics(time.end());
 return new Instance(relation.getDBIDs(), storage);
}

代码示例来源:origin: de.lmu.ifi.dbs.elki/elki-clustering

/**
 * Full instantiation method.
 * 
 * @param database Database
 * @param relation Vector relation
 * @return Instance
 */
public COPACNeighborPredicate.Instance instantiate(Database database, Relation<V> relation) {
 DistanceQuery<V> dq = database.getDistanceQuery(relation, EuclideanDistanceFunction.STATIC);
 KNNQuery<V> knnq = database.getKNNQuery(dq, settings.k);
 WritableDataStore<COPACModel> storage = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, COPACModel.class);
 Duration time = LOG.newDuration(this.getClass().getName() + ".preprocessing-time").begin();
 FiniteProgress progress = LOG.isVerbose() ? new FiniteProgress(this.getClass().getName(), relation.size(), LOG) : null;
 for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
  DoubleDBIDList ref = knnq.getKNNForDBID(iditer, settings.k);
  storage.put(iditer, computeLocalModel(iditer, ref, relation));
  LOG.incrementProcessed(progress);
 }
 LOG.ensureCompleted(progress);
 LOG.statistics(time.end());
 return new Instance(relation.getDBIDs(), storage);
}

代码示例来源:origin: de.lmu.ifi.dbs.elki/elki

/**
 * Full instantiation interface.
 * 
 * @param database Database
 * @param relation Relation
 * @return Instance
 */
public Instance instantiate(Database database, Relation<V> relation) {
 DistanceQuery<V> dq = database.getDistanceQuery(relation, EuclideanDistanceFunction.STATIC);
 KNNQuery<V> knnq = database.getKNNQuery(dq, settings.k);
 WritableDataStore<PCAFilteredResult> storage = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, PCAFilteredResult.class);
 Duration time = LOG.newDuration(this.getClass().getName() + ".preprocessing-time").begin();
 FiniteProgress progress = LOG.isVerbose() ? new FiniteProgress(this.getClass().getName(), relation.size(), LOG) : null;
 for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
  DoubleDBIDList ref = knnq.getKNNForDBID(iditer, settings.k);
  storage.put(iditer, settings.pca.processQueryResult(ref, relation));
  LOG.incrementProcessed(progress);
 }
 LOG.ensureCompleted(progress);
 LOG.statistics(time.end());
 return new Instance(relation.getDBIDs(), storage, relation);
}

代码示例来源:origin: de.lmu.ifi.dbs.elki/elki

/**
 * Full instantiation method.
 * 
 * @param database Database
 * @param relation Vector relation
 * @return Instance
 */
public COPACNeighborPredicate.Instance instantiate(Database database, Relation<V> relation) {
 DistanceQuery<V> dq = database.getDistanceQuery(relation, EuclideanDistanceFunction.STATIC);
 KNNQuery<V> knnq = database.getKNNQuery(dq, settings.k);
 WritableDataStore<COPACModel> storage = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, COPACModel.class);
 Duration time = LOG.newDuration(this.getClass().getName() + ".preprocessing-time").begin();
 FiniteProgress progress = LOG.isVerbose() ? new FiniteProgress(this.getClass().getName(), relation.size(), LOG) : null;
 for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
  DoubleDBIDList ref = knnq.getKNNForDBID(iditer, settings.k);
  storage.put(iditer, computeLocalModel(iditer, ref, relation));
  LOG.incrementProcessed(progress);
 }
 LOG.ensureCompleted(progress);
 LOG.statistics(time.end());
 return new Instance(relation.getDBIDs(), storage);
}

代码示例来源:origin: de.lmu.ifi.dbs.elki/elki

/**
 * Perform the preprocessing step.
 * 
 * @param modelcls Class of models
 * @param relation Data relation
 * @param query Range query
 * @return Precomputed models
 */
public DataStore<M> preprocess(Class<? super M> modelcls, Relation<O> relation, RangeQuery<O> query) {
 WritableDataStore<M> storage = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, modelcls);
 Duration time = getLogger().newDuration(this.getClass().getName() + ".preprocessing-time").begin();
 FiniteProgress progress = getLogger().isVerbose() ? new FiniteProgress(this.getClass().getName(), relation.size(), getLogger()) : null;
 for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
  DoubleDBIDList neighbors = query.getRangeForDBID(iditer, epsilon);
  storage.put(iditer, computeLocalModel(iditer, neighbors, relation));
  getLogger().incrementProcessed(progress);
 }
 getLogger().ensureCompleted(progress);
 getLogger().statistics(time.end());
 return storage;
}

代码示例来源:origin: de.lmu.ifi.dbs.elki/elki-clustering

/**
 * Perform the preprocessing step.
 * 
 * @param modelcls Class of models
 * @param relation Data relation
 * @param query Range query
 * @return Precomputed models
 */
public DataStore<M> preprocess(Class<? super M> modelcls, Relation<O> relation, RangeQuery<O> query) {
 WritableDataStore<M> storage = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, modelcls);
 Duration time = getLogger().newDuration(this.getClass().getName() + ".preprocessing-time").begin();
 FiniteProgress progress = getLogger().isVerbose() ? new FiniteProgress(this.getClass().getName(), relation.size(), getLogger()) : null;
 for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
  DoubleDBIDList neighbors = query.getRangeForDBID(iditer, epsilon);
  storage.put(iditer, computeLocalModel(iditer, neighbors, relation));
  getLogger().incrementProcessed(progress);
 }
 getLogger().ensureCompleted(progress);
 getLogger().statistics(time.end());
 return storage;
}

代码示例来源:origin: elki-project/elki

/**
 * Full instantiation interface.
 * 
 * @param database Database
 * @param relation Relation
 * @return Instance
 */
public Instance instantiate(Database database, Relation<V> relation) {
 DistanceQuery<V> dq = database.getDistanceQuery(relation, EuclideanDistanceFunction.STATIC);
 KNNQuery<V> knnq = database.getKNNQuery(dq, settings.k);
 WritableDataStore<PCAFilteredResult> storage = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, PCAFilteredResult.class);
 PCARunner pca = settings.pca;
 EigenPairFilter filter = settings.filter;
 Duration time = LOG.newDuration(this.getClass().getName() + ".preprocessing-time").begin();
 FiniteProgress progress = LOG.isVerbose() ? new FiniteProgress(this.getClass().getName(), relation.size(), LOG) : null;
 for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
  DoubleDBIDList ref = knnq.getKNNForDBID(iditer, settings.k);
  PCAResult pcares = pca.processQueryResult(ref, relation);
  storage.put(iditer, new PCAFilteredResult(pcares.getEigenPairs(), filter.filter(pcares.getEigenvalues()), 1., 0.));
  LOG.incrementProcessed(progress);
 }
 LOG.ensureCompleted(progress);
 LOG.statistics(time.end());
 return new Instance(relation.getDBIDs(), storage, relation);
}

代码示例来源:origin: de.lmu.ifi.dbs.elki/elki-clustering

/**
 * Full instantiation interface.
 * 
 * @param database Database
 * @param relation Relation
 * @return Instance
 */
public Instance instantiate(Database database, Relation<V> relation) {
 DistanceQuery<V> dq = database.getDistanceQuery(relation, EuclideanDistanceFunction.STATIC);
 KNNQuery<V> knnq = database.getKNNQuery(dq, settings.k);
 WritableDataStore<PCAFilteredResult> storage = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, PCAFilteredResult.class);
 PCARunner pca = settings.pca;
 EigenPairFilter filter = settings.filter;
 Duration time = LOG.newDuration(this.getClass().getName() + ".preprocessing-time").begin();
 FiniteProgress progress = LOG.isVerbose() ? new FiniteProgress(this.getClass().getName(), relation.size(), LOG) : null;
 for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
  DoubleDBIDList ref = knnq.getKNNForDBID(iditer, settings.k);
  PCAResult pcares = pca.processQueryResult(ref, relation);
  storage.put(iditer, new PCAFilteredResult(pcares.getEigenPairs(), filter.filter(pcares.getEigenvalues()), 1., 0.));
  LOG.incrementProcessed(progress);
 }
 LOG.ensureCompleted(progress);
 LOG.statistics(time.end());
 return new Instance(relation.getDBIDs(), storage, relation);
}

代码示例来源:origin: elki-project/elki

/**
 * Run k-medoids
 *
 * @param database Database
 * @param relation relation to use
 * @return result
 */
public Clustering<MedoidModel> run(Database database, Relation<V> relation) {
 if(k > 0x7FFF) {
  throw new NotImplementedException("PAM supports at most " + 0x7FFF + " clusters.");
 }
 DistanceQuery<V> distQ = DatabaseUtil.precomputedDistanceQuery(database, relation, getDistanceFunction(), LOG);
 DBIDs ids = relation.getDBIDs();
 ArrayModifiableDBIDs medoids = initialMedoids(distQ, ids);
 // Setup cluster assignment store
 WritableIntegerDataStore assignment = DataStoreUtil.makeIntegerStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, -1);
 Duration optd = getLogger().newDuration(getClass().getName() + ".optimization-time").begin();
 run(distQ, ids, medoids, assignment);
 getLogger().statistics(optd.end());
 ArrayModifiableDBIDs[] clusters = ClusteringAlgorithmUtil.partitionsFromIntegerLabels(ids, assignment, k);
 // Wrap result
 Clustering<MedoidModel> result = new Clustering<>("PAM Clustering", "pam-clustering");
 for(DBIDArrayIter it = medoids.iter(); it.valid(); it.advance()) {
  result.addToplevelCluster(new Cluster<>(clusters[it.getOffset()], new MedoidModel(DBIDUtil.deref(it))));
 }
 return result;
}

代码示例来源:origin: de.lmu.ifi.dbs.elki/elki-clustering

/**
 * Run k-medoids
 *
 * @param database Database
 * @param relation relation to use
 * @return result
 */
public Clustering<MedoidModel> run(Database database, Relation<V> relation) {
 if(k > 0x7FFF) {
  throw new NotImplementedException("PAM supports at most " + 0x7FFF + " clusters.");
 }
 DistanceQuery<V> distQ = DatabaseUtil.precomputedDistanceQuery(database, relation, getDistanceFunction(), LOG);
 DBIDs ids = relation.getDBIDs();
 ArrayModifiableDBIDs medoids = initialMedoids(distQ, ids);
 // Setup cluster assignment store
 WritableIntegerDataStore assignment = DataStoreUtil.makeIntegerStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, -1);
 Duration optd = getLogger().newDuration(getClass().getName() + ".optimization-time").begin();
 run(distQ, ids, medoids, assignment);
 getLogger().statistics(optd.end());
 ArrayModifiableDBIDs[] clusters = ClusteringAlgorithmUtil.partitionsFromIntegerLabels(ids, assignment, k);
 // Wrap result
 Clustering<MedoidModel> result = new Clustering<>("PAM Clustering", "pam-clustering");
 for(DBIDArrayIter it = medoids.iter(); it.valid(); it.advance()) {
  result.addToplevelCluster(new Cluster<>(clusters[it.getOffset()], new MedoidModel(DBIDUtil.deref(it))));
 }
 return result;
}

代码示例来源:origin: elki-project/elki

LOG.statistics(timer.end());

相关文章