org.apache.hadoop.hbase.client.Put.<init>()方法的使用及代码示例

x33g5p2x  于2022-01-26 转载在 其他  
字(11.7k)|赞(0)|评价(0)|浏览(179)

本文整理了Java中org.apache.hadoop.hbase.client.Put.<init>()方法的一些代码示例,展示了Put.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Put.<init>()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Put
类名称:Put
方法名:<init>

Put.<init>介绍

[英]Constructor for Writable. DO NOT USE
[中]可写的构造函数。不要使用

代码示例

代码示例来源:origin: apache/hbase

private void loadData(Table t, byte[] family, byte[] column) throws IOException {
 for (int i = 0; i < 10; i++) {
  byte[] row = Bytes.toBytes("row" + i);
  Put p = new Put(row);
  p.addColumn(family, column, row);
  t.put(p);
 }
}

代码示例来源:origin: apache/storm

@Override
public void multiPut(List<List<Object>> keys, List<T> values) {
  List<Put> puts = new ArrayList<Put>(keys.size());
  for (int i = 0; i < keys.size(); i++) {
    byte[] hbaseKey = this.options.mapMapper.rowKey(keys.get(i));
    String qualifier = this.options.mapMapper.qualifier(keys.get(i));
    LOG.info("Partiton: {}, Key: {}, Value: {}",
         new Object[]{ this.partitionNum, new String(hbaseKey), new String(this.serializer.serialize(values.get(i))) });
    Put put = new Put(hbaseKey);
    T val = values.get(i);
    put.add(this.options.columnFamily.getBytes(),
        qualifier.getBytes(),
        this.serializer.serialize(val));
    puts.add(put);
  }
  try {
    this.table.put(puts);
  } catch (InterruptedIOException e) {
    throw new FailedException("Interrupted while writing to HBase", e);
  } catch (RetriesExhaustedWithDetailsException e) {
    throw new FailedException("Retries exhaused while writing to HBase", e);
  } catch (IOException e) {
    throw new FailedException("IOException while writing to HBase", e);
  }
}

代码示例来源:origin: apache/hbase

protected void addSystemLabel(Region region, Map<String, Integer> labels,
  Map<String, List<Integer>> userAuths) throws IOException {
 if (!labels.containsKey(SYSTEM_LABEL)) {
  byte[] row = Bytes.toBytes(SYSTEM_LABEL_ORDINAL);
  Put p = new Put(row);
  p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
         .setRow(row)
         .setFamily(LABELS_TABLE_FAMILY)
         .setQualifier(LABEL_QUALIFIER)
         .setTimestamp(p.getTimestamp())
         .setType(Type.Put)
         .setValue(Bytes.toBytes(SYSTEM_LABEL))
         .build());
  region.put(p);
  labels.put(SYSTEM_LABEL, SYSTEM_LABEL_ORDINAL);
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testRowIsImmutableOrNot() {
 byte[] rowKey = Bytes.toBytes("immutable");
 // Test when row key is immutable
 Put putRowIsImmutable = new Put(rowKey, true);
 assertTrue(rowKey == putRowIsImmutable.getRow());  // No local copy is made
 // Test when row key is not immutable
 Put putRowIsNotImmutable = new Put(rowKey, 1000L, false);
 assertTrue(rowKey != putRowIsNotImmutable.getRow());  // A local copy is made
}

代码示例来源:origin: apache/hbase

@Test
public void testSimple() throws Exception {
 AsyncTable<?> table = getTable.get();
 table.put(new Put(row).addColumn(FAMILY, QUALIFIER, VALUE)).get();
 assertTrue(table.exists(new Get(row).addColumn(FAMILY, QUALIFIER)).get());
 Result result = table.get(new Get(row).addColumn(FAMILY, QUALIFIER)).get();
 assertArrayEquals(VALUE, result.getValue(FAMILY, QUALIFIER));
 table.delete(new Delete(row)).get();
 result = table.get(new Get(row).addColumn(FAMILY, QUALIFIER)).get();
 assertTrue(result.isEmpty());
 assertFalse(table.exists(new Get(row).addColumn(FAMILY, QUALIFIER)).get());
}

代码示例来源:origin: apache/hbase

@Override
 void testRow(final int i) throws IOException {
  byte[] row = format(i);
  Put put = new Put(row);
  byte[] value = generateData(this.rand, ROW_LENGTH);
  if (useTags) {
   byte[] tag = generateData(this.rand, TAG_LENGTH);
   Tag[] tags = new Tag[noOfTags];
   for (int n = 0; n < noOfTags; n++) {
    Tag t = new ArrayBackedTag((byte) n, tag);
    tags[n] = t;
   }
   KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
     value, tags);
   put.add(kv);
  } else {
   put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value);
  }
  put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
  mutator.mutate(put);
 }
}

代码示例来源:origin: apache/hbase

public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns)
  throws IOException {
 byte[] row = Bytes.toBytes(ns.getName());
 Put put = new Put(row, true).addColumn(HConstants.NAMESPACE_FAMILY,
  HConstants.NAMESPACE_COL_DESC_QUALIFIER,
  ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray());
 try (Table table = conn.getTable(TableName.META_TABLE_NAME)) {
  table.put(put);
 }
}

代码示例来源:origin: apache/hive

public void testHBaseSerDeWithBackwardEvolvedSchema() throws SerDeException, IOException {
 byte[] cfa = "cola".getBytes();
 byte[] qualAvro = "avro".getBytes();
 byte[] rowKey = Bytes.toBytes("test-row1");
 // Data
 List<Cell> kvs = new ArrayList<Cell>();
 byte[] avroData = getTestAvroBytesFromSchema(RECORD_SCHEMA_EVOLVED);
 kvs.add(new KeyValue(rowKey, cfa, qualAvro, avroData));
 Result r = Result.create(kvs);
 Put p = new Put(rowKey);
 // Post serialization, separators are automatically inserted between different fields in the
 // struct. Currently there is not way to disable that. So the work around here is to pad the
 // data with the separator bytes before creating a "Put" object
 p.add(new KeyValue(rowKey, cfa, qualAvro, avroData));
 Object[] expectedFieldsData = {new String("test-row1"), new String("[[42, true, 42432234234]]")};
 // Create, initialize, and test the SerDe
 HBaseSerDe serDe = new HBaseSerDe();
 Configuration conf = new Configuration();
 Properties tbl = createPropertiesForHiveAvroBackwardEvolvedSchema();
 serDe.initialize(conf, tbl);
 deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData,
   EXPECTED_DESERIALIZED_AVRO_STRING);
}

代码示例来源:origin: apache/hbase

private static List<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
  byte[] value) throws IOException {
 List<Put> puts = new ArrayList<>();
 for (int row = 0; row < rows.length; row++) {
  Put put = new Put(rows[row]);
  for (int fam = 0; fam < families.length; fam++) {
   for (int qual = 0; qual < qualifiers.length; qual++) {
    KeyValue kv = new KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
    put.add(kv);
   }
  }
  puts.add(put);
 }
 return puts;
}

代码示例来源:origin: apache/hbase

@Override
 void testRow(final int i) throws IOException {
  byte[] row = getRandomRow(this.rand, this.totalRows);
  Put put = new Put(row);
  byte[] value = generateData(this.rand, ROW_LENGTH);
  if (useTags) {
   byte[] tag = generateData(this.rand, TAG_LENGTH);
   Tag[] tags = new Tag[noOfTags];
   for (int n = 0; n < noOfTags; n++) {
    Tag t = new ArrayBackedTag((byte) n, tag);
    tags[n] = t;
   }
   KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
     value, tags);
   put.add(kv);
  } else {
   put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value);
  }
  put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
  mutator.mutate(put);
 }
}

代码示例来源:origin: apache/hbase

/**
 * Tests that a put on a table throws {@link SocketTimeoutException} when the operation takes
 * longer than 'hbase.client.operation.timeout'.
 */
@Test(expected = SocketTimeoutException.class)
public void testPutTimeout() throws Exception {
 DELAY_MUTATE = 600;
 Put put = new Put(ROW);
 put.addColumn(FAMILY, QUALIFIER, VALUE);
 table.put(put);
}

代码示例来源:origin: apache/hive

public void testHBaseSerDeWithForwardEvolvedSchema() throws SerDeException, IOException {
 byte[] cfa = "cola".getBytes();
 byte[] qualAvro = "avro".getBytes();
 byte[] rowKey = Bytes.toBytes("test-row1");
 // Data
 List<Cell> kvs = new ArrayList<Cell>();
 byte[] avroData = getTestAvroBytesFromSchema(RECORD_SCHEMA);
 kvs.add(new KeyValue(rowKey, cfa, qualAvro, avroData));
 Result r = Result.create(kvs);
 Put p = new Put(rowKey);
 // Post serialization, separators are automatically inserted between different fields in the
 // struct. Currently there is not way to disable that. So the work around here is to pad the
 // data with the separator bytes before creating a "Put" object
 p.add(new KeyValue(rowKey, cfa, qualAvro, avroData));
 Object[] expectedFieldsData = {new String("test-row1"),
   new String("[[42, test, true, 42432234234]]")};
 // Create, initialize, and test the SerDe
 HBaseSerDe serDe = new HBaseSerDe();
 Configuration conf = new Configuration();
 Properties tbl = createPropertiesForHiveAvroForwardEvolvedSchema();
 serDe.initialize(conf, tbl);
 deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData,
   EXPECTED_DESERIALIZED_AVRO_STRING_3);
}

代码示例来源:origin: apache/hbase

private static List<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
  byte[] value) throws IOException {
 List<Put> puts = new ArrayList<>();
 for (int row = 0; row < rows.length; row++) {
  Put put = new Put(rows[row]);
  for (int fam = 0; fam < families.length; fam++) {
   for (int qual = 0; qual < qualifiers.length; qual++) {
    KeyValue kv = new KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
    put.add(kv);
   }
  }
  puts.add(put);
 }
 return puts;
}

代码示例来源:origin: apache/hbase

private void put(int start, int end, long ts) throws IOException {
 for (int i = start; i < end; i++) {
  TABLE.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUALIFIER, ts, Bytes.toBytes(i)));
 }
}

代码示例来源:origin: apache/hive

public void testHBaseSerDeWithAvroSchemaInline() throws SerDeException, IOException {
 byte[] cfa = "cola".getBytes();
 byte[] qualAvro = "avro".getBytes();
 byte[] rowKey = Bytes.toBytes("test-row1");
 // Data
 List<Cell> kvs = new ArrayList<Cell>();
 byte[] avroData = getTestAvroBytesFromSchema(RECORD_SCHEMA);
 kvs.add(new KeyValue(rowKey, cfa, qualAvro, avroData));
 Result r = Result.create(kvs);
 Put p = new Put(rowKey);
 // Post serialization, separators are automatically inserted between different fields in the
 // struct. Currently there is not way to disable that. So the work around here is to pad the
 // data with the separator bytes before creating a "Put" object
 p.add(new KeyValue(rowKey, cfa, qualAvro, avroData));
 Object[] expectedFieldsData = {new String("test-row1"), new String("[[42, true, 42432234234]]")};
 // Create, initialize, and test the SerDe
 HBaseSerDe serDe = new HBaseSerDe();
 Configuration conf = new Configuration();
 Properties tbl = createPropertiesForHiveAvroSchemaInline();
 serDe.initialize(conf, tbl);
 deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData,
   EXPECTED_DESERIALIZED_AVRO_STRING);
}

代码示例来源:origin: apache/hbase

private static ArrayList<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
  byte[] value) throws IOException {
 Put put;
 ArrayList<Put> puts = new ArrayList<>();
 for (byte[] row1 : rows) {
  put = new Put(row1);
  for (byte[] family : families) {
   for (int qual = 0; qual < qualifiers.length; qual++) {
    KeyValue kv = new KeyValue(row1, family, qualifiers[qual], qual, value);
    put.add(kv);
   }
  }
  puts.add(put);
 }
 return puts;
}

代码示例来源:origin: apache/hbase

@Test(expected = DoNotRetryIOException.class)
public void testPutWithDoNotRetryIOException() throws Exception {
 tableDoNotRetry.put(new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("value")));
}

代码示例来源:origin: apache/hbase

protected void checkShouldFlush(Configuration conf, boolean expected) throws Exception {
 try {
  EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
  EnvironmentEdgeManager.injectEdge(edge);
  HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf);
  String cf = "foo";
  HRegion region =
    hbaseUtility.createTestRegion("foobar", ColumnFamilyDescriptorBuilder.of(cf));
  edge.setCurrentTimeMillis(1234);
  Put p = new Put(Bytes.toBytes("r"));
  p.add(KeyValueTestUtil.create("r", cf, "q", 100, "v"));
  region.put(p);
  edge.setCurrentTimeMillis(1234 + 100);
  StringBuilder sb = new StringBuilder();
  assertTrue(!region.shouldFlush(sb));
  edge.setCurrentTimeMillis(1234 + 10000);
  assertTrue(region.shouldFlush(sb) == expected);
 } finally {
  EnvironmentEdgeManager.reset();
 }
}

代码示例来源:origin: apache/hbase

/**
 * Make puts to put the input value into each combination of row, family, and qualifier
 */
static ArrayList<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
  byte[] value) throws IOException {
 Put put;
 ArrayList<Put> puts = new ArrayList<>();
 for (int row = 0; row < rows.length; row++) {
  put = new Put(rows[row]);
  for (int fam = 0; fam < families.length; fam++) {
   for (int qual = 0; qual < qualifiers.length; qual++) {
    KeyValue kv = new KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
    put.add(kv);
   }
  }
  puts.add(put);
 }
 return puts;
}

代码示例来源:origin: apache/hbase

@Test(expected = RetriesExhaustedException.class)
public void testPutWithIOException() throws Exception {
 tableRetry.put(new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("value")));
}

相关文章