org.influxdb.InfluxDB类的使用及代码示例

x33g5p2x  于2022-01-21 转载在 其他  
字(13.0k)|赞(0)|评价(0)|浏览(197)

本文整理了Java中org.influxdb.InfluxDB类的一些代码示例,展示了InfluxDB类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。InfluxDB类的具体详情如下:
包路径:org.influxdb.InfluxDB
类名称:InfluxDB

InfluxDB介绍

[英]Interface with all available methods to access a InfluxDB database. A full list of currently available interfaces is implemented in: https://github.com/ influxdb/influxdb/blob/master/src/api/http/api.go
[中]与访问XDB数据库的所有可用方法的接口。当前可用接口的完整列表在https://github.com/ influxdb/influxdb/blob/master/src/api/http/api.go中实现

代码示例

代码示例来源:origin: apache/nifi

protected void writeToInfluxDB(ProcessContext context, String consistencyLevel, String database, String retentionPolicy, String records) {
  getInfluxDB(context).write(database, retentionPolicy, InfluxDB.ConsistencyLevel.valueOf(consistencyLevel), records);
}

代码示例来源:origin: jmxtrans/jmxtrans

public void doWrite(Server server, Query query, Iterable<Result> results) throws Exception {
  if (createDatabase) influxDB.createDatabase(database);
  BatchPoints.Builder batchPointsBuilder = BatchPoints.database(database).retentionPolicy(retentionPolicy)
      .tag(TAG_HOSTNAME, server.getSource());
  influxDB.write(batchPoints);

代码示例来源:origin: SeldonIO/seldon-server

influxDB.enableBatch(50, 5, TimeUnit.SECONDS);

代码示例来源:origin: testcontainers/testcontainers-java

@Test
  public void queryForWriteAndRead() {
    InfluxDB influxDB = influxDBContainer.getNewInfluxDB();

    Point point = Point.measurement("cpu")
      .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
      .addField("idle", 90L)
      .addField("user", 9L)
      .addField("system", 1L)
      .build();
    influxDB.write(point);

    Query query = new Query("SELECT idle FROM cpu", DATABASE);
    QueryResult actual = influxDB.query(query);

    assertThat(actual, notNullValue());
    assertThat(actual.getError(), nullValue());
    assertThat(actual.getResults(), notNullValue());
    assertThat(actual.getResults().size(), is(1));

  }
}

代码示例来源:origin: inspectIT/inspectIT

@Test
public void enableInfluxDatabaseExists() {
  influxDao.active = true;
  when(influxDb.describeDatabases()).thenReturn(Arrays.asList(influxDao.database));
  influxDao.propertiesUpdated();
  assertThat(influxDao.isConnected(), is(true));
  assertThat(influxDao.getServiceStatus(), is(ExternalServiceStatus.CONNECTED));
  verify(executor, times(1)).submit(any(Runnable.class));
  verify(influxDb).ping();
  verify(influxDb).write(any(String.class), any(String.class), any(Point.class));
  verify(influxDb).query(any(Query.class));
  verify(influxDb).isBatchEnabled();
  verify(influxDb).enableBatch(InfluxDBDao.BATCH_BUFFER_SIZE, InfluxDBDao.BATCH_FLUSH_TIMER, TimeUnit.SECONDS);
  verify(availabilityChecker).deactivate();
  verify(availabilityChecker).setInflux(influxDb);
  verify(availabilityChecker).activate();
  verify(clientFactory).createClient();
  verifyNoMoreInteractions(influxDb, availabilityChecker, executor, clientFactory);
  verifyZeroInteractions(future);
}

代码示例来源:origin: inspectIT/inspectIT

@Test
public void writeTestFailsDatabaseCreationSucceeds() {
  influxDao.active = true;
  Mockito.doThrow(new RuntimeException()).when(influxDb).write(any(String.class), any(String.class), any(Point.class));
  influxDao.propertiesUpdated();
  assertThat(influxDao.isConnected(), is(true));
  verify(executor, times(1)).submit(any(Runnable.class));
  verify(influxDb).write(any(String.class), any(String.class), any(Point.class));
  verify(clientFactory).createClient();
  verify(influxDb).describeDatabases();
  verify(influxDb).createDatabase(any(String.class));
  verify(influxDb, times(1)).isBatchEnabled();
  verify(influxDb).enableBatch(InfluxDBDao.BATCH_BUFFER_SIZE, InfluxDBDao.BATCH_FLUSH_TIMER, TimeUnit.SECONDS);
  verify(influxDb).ping();
  verify(availabilityChecker, times(1)).deactivate();
  verify(availabilityChecker).setInflux(influxDb);
  verify(availabilityChecker).activate();
  verifyNoMoreInteractions(future, influxDb, executor, clientFactory);
}

代码示例来源:origin: dataArtisans/oscon

@Override
public void open(Configuration parameters) throws Exception {
 super.open(parameters);
 influxDB = InfluxDBFactory.connect("http://localhost:8086", "admin", "admin");
 influxDB.createDatabase(dataBaseName);
 influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS);
}

代码示例来源:origin: org.apache.camel/camel-influxdb

private void doInsert(Exchange exchange, String dataBaseName, String retentionPolicy) throws InvalidPayloadException {
  if (!endpoint.isBatch()) {
    Point p = exchange.getIn().getMandatoryBody(Point.class);
    try {
      LOG.debug("Writing point {}", p.lineProtocol());
      
      if (!connection.databaseExists(dataBaseName)) {
        LOG.debug("Database {} doesn't exist. Creating it...", dataBaseName);
        connection.createDatabase(dataBaseName);
      }
      connection.write(dataBaseName, retentionPolicy, p);
    } catch (Exception ex) {
      exchange.setException(new CamelInfluxDbException(ex));
    }
  } else {
    BatchPoints batchPoints = exchange.getIn().getMandatoryBody(BatchPoints.class);
    try {
      LOG.debug("Writing BatchPoints {}", batchPoints.lineProtocol());
      connection.write(batchPoints);
    } catch (Exception ex) {
      exchange.setException(new CamelInfluxDbException(ex));
    }
  }
}

代码示例来源:origin: inspectIT/inspectIT

@Test
public void writeTestFailsDatabaseCreationFails() {
  influxDao.active = true;
  Mockito.doThrow(new RuntimeException()).when(influxDb).write(any(String.class), any(String.class), any(Point.class));
  Mockito.doThrow(new RuntimeException()).when(influxDb).describeDatabases();
  Mockito.doThrow(new RuntimeException()).when(influxDb).createDatabase(any(String.class));
  influxDao.propertiesUpdated();
  assertThat(influxDao.isConnected(), is(false));
  verify(executor, times(1)).submit(any(Runnable.class));
  verify(clientFactory).createClient();
  verify(influxDb).write(any(String.class), any(String.class), any(Point.class));
  verify(influxDb).describeDatabases();
  verify(influxDb).createDatabase(any(String.class));
  verifyNoMoreInteractions(future, influxDb, executor, clientFactory);
}

代码示例来源:origin: amient/kafka-metrics

public void tryPublish(MeasurementV1 m) {
  if (influxDB == null) {
    influxDB = InfluxDBFactory.connect(address, username, password);
    influxDB.enableBatch(1000, 100, TimeUnit.MILLISECONDS);
  }
  Point.Builder builder = Point.measurement(m.getName().toString()).time(m.getTimestamp(), TimeUnit.MILLISECONDS);
  for (java.util.Map.Entry<String, String> tag : m.getTags().entrySet()) {
    builder.tag(tag.getKey().toString(), tag.getValue().toString());
  }
  for (java.util.Map.Entry<String, Double> field : m.getFields().entrySet()) {
    builder.field(field.getKey().toString(), field.getValue());
  }
  influxDB.write(dbName, retention, builder.build());
}

代码示例来源:origin: apache/bahir-flink

/**
 * Initializes the connection to InfluxDB by either cluster or sentinels or single server.
 */
@Override
public void open(Configuration parameters) throws Exception {
  super.open(parameters);
  influxDBClient = InfluxDBFactory.connect(influxDBConfig.getUrl(), influxDBConfig.getUsername(), influxDBConfig.getPassword());
  if (!influxDBClient.databaseExists(influxDBConfig.getDatabase())) {
    if(influxDBConfig.isCreateDatabase()) {
      influxDBClient.createDatabase(influxDBConfig.getDatabase());
    }
    else {
      throw new RuntimeException("This " + influxDBConfig.getDatabase() + " database does not exist!");
    }
  }
  influxDBClient.setDatabase(influxDBConfig.getDatabase());
  if (influxDBConfig.getBatchActions() > 0) {
    influxDBClient.enableBatch(influxDBConfig.getBatchActions(), influxDBConfig.getFlushDuration(), influxDBConfig.getFlushDurationTimeUnit());
  }
  if (influxDBConfig.isEnableGzip()) {
    influxDBClient.enableGzip();
  }
}

代码示例来源:origin: Scrin/RuuviCollector

int batchTime
) {
  influxDB = InfluxDBFactory.connect(url, user, password).setDatabase(database).setRetentionPolicy(retentionPolicy);
  if (gzip) {
    influxDB.enableGzip();
  } else {
    influxDB.disableGzip();
    influxDB.enableBatch(batchSize, batchTime, TimeUnit.MILLISECONDS);
  } else {
    influxDB.disableBatch();

代码示例来源:origin: inspectIT/inspectIT

@Test
public void reconnected() {
  influxDao.active = true;
  when(influxDb.isBatchEnabled()).thenReturn(false);
  influxDao.onReconnection();
  assertThat(influxDao.isConnected(), is(true));
  assertThat(influxDao.getServiceStatus(), is(ExternalServiceStatus.CONNECTED));
  verify(influxDb).isBatchEnabled();
  verify(influxDb).enableBatch(InfluxDBDao.BATCH_BUFFER_SIZE, InfluxDBDao.BATCH_FLUSH_TIMER, TimeUnit.SECONDS);
  verify(influxDb).describeDatabases();
  verify(influxDb).createDatabase(influxDao.database);
  verifyNoMoreInteractions(influxDb);
  verifyZeroInteractions(future, executor, availabilityChecker, clientFactory);
}

代码示例来源:origin: org.mycontroller.standalone/mycontroller-core

@Override
public void connect() {
  if (_config.getUsername() != null && _config.getUsername().trim().length() > 0) {
    _client = InfluxDBFactory.connect(_config.getUrl(), _config.getUsername(), _config.getPassword());
  } else {
    _client = InfluxDBFactory.connect(_config.getUrl());
  }
  _client.setDatabase(_config.getDatabase());
  _client.enableBatch(BatchOptions.DEFAULTS.actions(FLUSH_POINTS).flushDuration(FLUSH_DURATION));
  _logger.debug("External server:{}, Influxdb client BatchSettings[flush, points:{}, duration:{} ms]",
      _config.getName(), FLUSH_POINTS, FLUSH_DURATION);
}

代码示例来源:origin: org.apereo.cas/cas-server-support-influxdb-core

public InfluxDbConnectionFactory(final String url, final String uid,
                 final String psw, final String dbName,
                 final boolean dropDatabase) {
  if (StringUtils.isBlank(dbName) || StringUtils.isBlank(url)) {
    throw new IllegalArgumentException("Database name/url cannot be blank and must be specified");
  }
  val builder = new OkHttpClient.Builder();
  this.influxDb = InfluxDBFactory.connect(url, uid, psw, builder);
  this.influxDb.enableGzip();
  if (dropDatabase) {
    this.influxDb.deleteDatabase(dbName);
  }
  if (!this.influxDb.databaseExists(dbName)) {
    this.influxDb.createDatabase(dbName);
  }
  this.influxDb.setLogLevel(InfluxDB.LogLevel.NONE);
  if (LOGGER.isDebugEnabled()) {
    this.influxDb.setLogLevel(InfluxDB.LogLevel.FULL);
  } else if (LOGGER.isInfoEnabled()) {
    this.influxDb.setLogLevel(InfluxDB.LogLevel.BASIC);
  } else if (LOGGER.isWarnEnabled()) {
    this.influxDb.setLogLevel(InfluxDB.LogLevel.HEADERS);
  } else if (LOGGER.isErrorEnabled()) {
    this.influxDb.setLogLevel(InfluxDB.LogLevel.NONE);
  }
}

代码示例来源:origin: NovatecConsulting/JMeter-InfluxDB-Writer

@Override
public void teardownTest(BackendListenerContext context) throws Exception {
  LOGGER.info("Shutting down influxDB scheduler...");
  scheduler.shutdown();
  addVirtualUsersMetrics(0,0,0,0,JMeterContextService.getThreadCounts().finishedThreads);
  influxDB.write(
      influxDBConfig.getInfluxDatabase(),
      influxDBConfig.getInfluxRetentionPolicy(),
      Point.measurement(TestStartEndMeasurement.MEASUREMENT_NAME).time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
          .tag(TestStartEndMeasurement.Tags.TYPE, TestStartEndMeasurement.Values.FINISHED)
          .tag(TestStartEndMeasurement.Tags.NODE_NAME, nodeName)
          .tag(TestStartEndMeasurement.Tags.RUN_ID, runId)
          .tag(TestStartEndMeasurement.Tags.TEST_NAME, testName)
          .addField(TestStartEndMeasurement.Fields.PLACEHOLDER,"1")
          .build());
  influxDB.disableBatch();
  try {
    scheduler.awaitTermination(30, TimeUnit.SECONDS);
    LOGGER.info("influxDB scheduler terminated!");
  } catch (InterruptedException e) {
    LOGGER.error("Error waiting for end of scheduler");
  }
  samplersToFilter.clear();
  super.teardownTest(context);
}

代码示例来源:origin: sitewhere/sitewhere

@Override
public void initialize(ILifecycleProgressMonitor monitor) throws SiteWhereException {
super.start(monitor);
String connectionUrl = "http://" + getHostname().getValue() + ":" + getConfiguration().getPort();
this.influx = InfluxDBFactory.connect(connectionUrl, getConfiguration().getUsername(),
  getConfiguration().getPassword());
influx.createDatabase(getDatabase().getValue());
if (getConfiguration().isEnableBatch()) {
  influx.enableBatch(getConfiguration().getBatchChunkSize(), getConfiguration().getBatchIntervalMs(),
    TimeUnit.MILLISECONDS);
}
influx.setLogLevel(convertLogLevel(getConfiguration().getLogLevel()));
}

代码示例来源:origin: inspectIT/inspectIT

@Test
public void writeTestFailsDatabaseExists() {
  influxDao.active = true;
  Mockito.doThrow(new RuntimeException()).when(influxDb).write(any(String.class), any(String.class), any(Point.class));
  when(influxDb.describeDatabases()).thenReturn(Arrays.asList(influxDao.database));
  influxDao.propertiesUpdated();
  assertThat(influxDao.isConnected(), is(false));
  verify(executor, times(1)).submit(any(Runnable.class));
  verify(influxDb).write(any(String.class), any(String.class), any(Point.class));
  verify(clientFactory).createClient();
  verify(influxDb).describeDatabases();
  verifyNoMoreInteractions(future, influxDb, executor, clientFactory);
}

代码示例来源:origin: Scrin/RuuviCollector

public LegacyInfluxDBConnection() {
  influxDB = InfluxDBFactory.connect(Config.getInfluxUrl(), Config.getInfluxUser(), Config.getInfluxPassword());
  influxDB.setDatabase(Config.getInfluxDatabase());
  influxDB.enableGzip();
  influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); // TODO: make these configurable
}

代码示例来源:origin: apache/nifi

protected List<QueryResult> executeQuery(final ProcessContext context, String database, String query, TimeUnit timeunit,
                     int chunkSize) throws InterruptedException {
  final CountDownLatch latch = new CountDownLatch(1);
  InfluxDB influx = getInfluxDB(context);
  Query influxQuery = new Query(query, database);
  if (chunkSize > 0) {
    List<QueryResult> results = new LinkedList<>();
    influx.query(influxQuery, chunkSize, result -> {
      if (isQueryDone(result.getError())) {
        latch.countDown();
      } else {
        results.add(result);
      }
    });
    latch.await();
    return results;
  } else {
    return Collections.singletonList(influx.query(influxQuery, timeunit));
  }
}

相关文章