本文整理了Java中org.apache.hadoop.hbase.client.Put.add()
方法的一些代码示例,展示了Put.add()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Put.add()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Put
类名称:Put
方法名:add
[英]Add the specified KeyValue to this Put operation. Operation assumes that the passed KeyValue is immutable and its backing array will not be modified for the duration of this Put.
[中]将指定的键值添加到此Put操作。操作假定传递的KeyValue是不可变的,并且在此Put期间不会修改其支持数组。
代码示例来源:origin: apache/storm
@Override
public void multiPut(List<List<Object>> keys, List<T> values) {
List<Put> puts = new ArrayList<Put>(keys.size());
for (int i = 0; i < keys.size(); i++) {
byte[] hbaseKey = this.options.mapMapper.rowKey(keys.get(i));
String qualifier = this.options.mapMapper.qualifier(keys.get(i));
LOG.info("Partiton: {}, Key: {}, Value: {}",
new Object[]{ this.partitionNum, new String(hbaseKey), new String(this.serializer.serialize(values.get(i))) });
Put put = new Put(hbaseKey);
T val = values.get(i);
put.add(this.options.columnFamily.getBytes(),
qualifier.getBytes(),
this.serializer.serialize(val));
puts.add(put);
}
try {
this.table.put(puts);
} catch (InterruptedIOException e) {
throw new FailedException("Interrupted while writing to HBase", e);
} catch (RetriesExhaustedWithDetailsException e) {
throw new FailedException("Retries exhaused while writing to HBase", e);
} catch (IOException e) {
throw new FailedException("IOException while writing to HBase", e);
}
}
代码示例来源:origin: apache/hbase
@Override
public Boolean call() throws Exception {
try (Table t = connection.getTable(tableName)) {
byte[] value = Bytes.toBytes(Double.toString(ThreadLocalRandom.current().nextDouble()));
byte[] rk = Bytes.toBytes(ThreadLocalRandom.current().nextLong());
Put p = new Put(rk);
p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
.setRow(rk)
.setFamily(FAMILY)
.setQualifier(QUAL)
.setTimestamp(p.getTimestamp())
.setType(Type.Put)
.setValue(value)
.build());
t.put(p);
}
return true;
}
}
代码示例来源:origin: apache/hbase
protected void addSystemLabel(Region region, Map<String, Integer> labels,
Map<String, List<Integer>> userAuths) throws IOException {
if (!labels.containsKey(SYSTEM_LABEL)) {
byte[] row = Bytes.toBytes(SYSTEM_LABEL_ORDINAL);
Put p = new Put(row);
p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
.setRow(row)
.setFamily(LABELS_TABLE_FAMILY)
.setQualifier(LABEL_QUALIFIER)
.setTimestamp(p.getTimestamp())
.setType(Type.Put)
.setValue(Bytes.toBytes(SYSTEM_LABEL))
.build());
region.put(p);
labels.put(SYSTEM_LABEL, SYSTEM_LABEL_ORDINAL);
}
}
代码示例来源:origin: apache/hbase
assertEquals(rowSize, kvs2.size());
assertEquals(row2Size, kvs1.size());
} else {
assertEquals(rowSize, kvs1.size());
assertEquals(row2Size, kvs2.size());
kv.getTimestamp(), KeyValue.Type.Delete);
d.add(kvDelete);
Put p = new Put(rows[1 - i]);
KeyValue kvAdd =
new KeyValue(rows[1 - i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv),
now, CellUtil.cloneValue(kv));
p.add(kvAdd);
mutations.add(d);
walEdit.add(kvDelete);
代码示例来源:origin: apache/hbase
public static void addReplicationBarrier(Put put, long openSeqNum) throws IOException {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
.setRow(put.getRow())
.setFamily(HConstants.REPLICATION_BARRIER_FAMILY)
.setQualifier(HConstants.SEQNUM_QUALIFIER)
.setTimestamp(put.getTimestamp())
.setType(Type.Put)
.setValue(Bytes.toBytes(openSeqNum))
.build());
}
代码示例来源:origin: apache/hbase
TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray());
if (this.walEntrySinkFilter != null) {
if (this.walEntrySinkFilter.filter(table, entry.getKey().getWriteTime())) {
cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(),
cell.getRowLength());
List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size());
((Delete) mutation).add(cell);
} else {
((Put) mutation).add(cell);
代码示例来源:origin: apache/hbase
@Override
void testRow(final int i) throws IOException {
byte[] row = getRandomRow(this.rand, this.totalRows);
Put put = new Put(row);
byte[] value = generateData(this.rand, ROW_LENGTH);
if (useTags) {
byte[] tag = generateData(this.rand, TAG_LENGTH);
Tag[] tags = new Tag[noOfTags];
for (int n = 0; n < noOfTags; n++) {
Tag t = new ArrayBackedTag((byte) n, tag);
tags[n] = t;
}
KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
value, tags);
put.add(kv);
} else {
put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value);
}
put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
mutator.mutate(put);
}
}
代码示例来源:origin: apache/hbase
Permission.Action[] actions = permission.getActions();
byte[] rowKey = userPermissionRowKey(permission);
Put p = new Put(rowKey);
byte[] key = userPermissionKey(userPerm);
value[index++] = action.code();
p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
.setRow(p.getRow())
.setFamily(ACL_LIST_FAMILY)
t.put(Collections.singletonList(p));
} finally {
t.close();
代码示例来源:origin: apache/hbase
@Override
public Boolean call() throws Exception {
// Table implements Closable so we use the try with resource structure here.
// https://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html
try (Table t = connection.getTable(tableName)) {
byte[] value = Bytes.toBytes(Double.toString(ThreadLocalRandom.current().nextDouble()));
int rows = 30;
// Array to put the batch
ArrayList<Put> puts = new ArrayList<>(rows);
for (int i = 0; i < 30; i++) {
byte[] rk = Bytes.toBytes(ThreadLocalRandom.current().nextLong());
Put p = new Put(rk);
p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
.setRow(rk)
.setFamily(FAMILY)
.setQualifier(QUAL)
.setTimestamp(p.getTimestamp())
.setType(Cell.Type.Put)
.setValue(value)
.build());
puts.add(p);
}
// now that we've assembled the batch it's time to push it to hbase.
t.put(puts);
}
return true;
}
}
代码示例来源:origin: apache/hbase
Assert.assertEquals(FaultyFileSystem.class, fs.getClass());
FaultyFileSystem ffs = (FaultyFileSystem)fs;
HRegion region = null;
Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES);
long size = region.getMemStoreDataSize();
Assert.assertEquals(0, size);
Put p1 = new Put(row);
p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[])null));
region.put(p1);
storeFlushCtx.prepare();
Put p2 = new Put(row);
p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[])null));
p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[])null));
region.put(p2);
代码示例来源:origin: apache/hive
public void testHBaseSerDeWithBackwardEvolvedSchema() throws SerDeException, IOException {
byte[] cfa = "cola".getBytes();
byte[] qualAvro = "avro".getBytes();
byte[] rowKey = Bytes.toBytes("test-row1");
// Data
List<Cell> kvs = new ArrayList<Cell>();
byte[] avroData = getTestAvroBytesFromSchema(RECORD_SCHEMA_EVOLVED);
kvs.add(new KeyValue(rowKey, cfa, qualAvro, avroData));
Result r = Result.create(kvs);
Put p = new Put(rowKey);
// Post serialization, separators are automatically inserted between different fields in the
// struct. Currently there is not way to disable that. So the work around here is to pad the
// data with the separator bytes before creating a "Put" object
p.add(new KeyValue(rowKey, cfa, qualAvro, avroData));
Object[] expectedFieldsData = {new String("test-row1"), new String("[[42, true, 42432234234]]")};
// Create, initialize, and test the SerDe
HBaseSerDe serDe = new HBaseSerDe();
Configuration conf = new Configuration();
Properties tbl = createPropertiesForHiveAvroBackwardEvolvedSchema();
serDe.initialize(conf, tbl);
deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData,
EXPECTED_DESERIALIZED_AVRO_STRING);
}
代码示例来源:origin: apache/hbase
private static List<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
byte[] value) throws IOException {
List<Put> puts = new ArrayList<>();
for (int row = 0; row < rows.length; row++) {
Put put = new Put(rows[row]);
for (int fam = 0; fam < families.length; fam++) {
for (int qual = 0; qual < qualifiers.length; qual++) {
KeyValue kv = new KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
put.add(kv);
}
}
puts.add(put);
}
return puts;
}
代码示例来源:origin: apache/hbase
private static Put addSequenceNum(Put p, long openSeqNum, int replicaId) throws IOException {
return p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
.setRow(p.getRow())
.setFamily(HConstants.CATALOG_FAMILY)
.setQualifier(getSeqNumColumn(replicaId))
.setTimestamp(p.getTimestamp())
.setType(Type.Put)
.setValue(Bytes.toBytes(openSeqNum))
.build());
}
}
代码示例来源:origin: apache/hbase
MutationType.DELETE, m, HConstants.NO_NONCE, HConstants.NO_NONCE));
} else {
m = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
((Delete) m).add(cell);
} else {
((Put) m).add(cell);
clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
key = new WALKeyImpl(walKeyProto.getEncodedRegionName().toByteArray(), TableName.valueOf(
walKeyProto.getTableName().toByteArray()), replaySeqId, walKeyProto.getWriteTime(),
clusterIds, walKeyProto.getNonceGroup(), walKeyProto.getNonce(), null);
代码示例来源:origin: apache/hbase
@Override
void testRow(final int i) throws IOException {
byte[] row = format(i);
Put put = new Put(row);
byte[] value = generateData(this.rand, ROW_LENGTH);
if (useTags) {
byte[] tag = generateData(this.rand, TAG_LENGTH);
Tag[] tags = new Tag[noOfTags];
for (int n = 0; n < noOfTags; n++) {
Tag t = new ArrayBackedTag((byte) n, tag);
tags[n] = t;
}
KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
value, tags);
put.add(kv);
} else {
put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value);
}
put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
mutator.mutate(put);
}
}
代码示例来源:origin: apache/hbase
try {
table = getTable(tableName);
Put put = new Put(getBytes(row), timestamp);
addAttributes(put, attributes);
+ "over the whole column family.");
} else {
put.add(builder.clear()
.setRow(put.getRow())
.setFamily(famAndQf[0])
table.put(put);
代码示例来源:origin: apache/hbase
@Test
public void testAddKeyValue() throws IOException {
final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents");
final byte[] value = Bytes.toBytes("abcd");
final byte[] row1 = Bytes.toBytes("row1");
final byte[] row2 = Bytes.toBytes("row2");
byte[] qualifier = Bytes.toBytes("qf1");
Put put = new Put(row1);
// Adding KeyValue with the same row
KeyValue kv = new KeyValue(row1, CONTENTS_FAMILY, qualifier, value);
boolean ok = true;
try {
put.add(kv);
} catch (IOException e) {
ok = false;
}
assertEquals(true, ok);
// Adding KeyValue with the different row
kv = new KeyValue(row2, CONTENTS_FAMILY, qualifier, value);
ok = false;
try {
put.add(kv);
} catch (IOException e) {
ok = true;
}
assertEquals(true, ok);
}
代码示例来源:origin: apache/hive
public void testHBaseSerDeWithAvroSchemaInline() throws SerDeException, IOException {
byte[] cfa = "cola".getBytes();
byte[] qualAvro = "avro".getBytes();
byte[] rowKey = Bytes.toBytes("test-row1");
// Data
List<Cell> kvs = new ArrayList<Cell>();
byte[] avroData = getTestAvroBytesFromSchema(RECORD_SCHEMA);
kvs.add(new KeyValue(rowKey, cfa, qualAvro, avroData));
Result r = Result.create(kvs);
Put p = new Put(rowKey);
// Post serialization, separators are automatically inserted between different fields in the
// struct. Currently there is not way to disable that. So the work around here is to pad the
// data with the separator bytes before creating a "Put" object
p.add(new KeyValue(rowKey, cfa, qualAvro, avroData));
Object[] expectedFieldsData = {new String("test-row1"), new String("[[42, true, 42432234234]]")};
// Create, initialize, and test the SerDe
HBaseSerDe serDe = new HBaseSerDe();
Configuration conf = new Configuration();
Properties tbl = createPropertiesForHiveAvroSchemaInline();
serDe.initialize(conf, tbl);
deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData,
EXPECTED_DESERIALIZED_AVRO_STRING);
}
代码示例来源:origin: apache/hbase
private static List<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
byte[] value) throws IOException {
List<Put> puts = new ArrayList<>();
for (int row = 0; row < rows.length; row++) {
Put put = new Put(rows[row]);
for (int fam = 0; fam < families.length; fam++) {
for (int qual = 0; qual < qualifiers.length; qual++) {
KeyValue kv = new KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
put.add(kv);
}
}
puts.add(put);
}
return puts;
}
代码示例来源:origin: apache/hbase
private static void addRegionStateToPut(Put put, RegionState.State state) throws IOException {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
.setRow(put.getRow())
.setFamily(HConstants.CATALOG_FAMILY)
.setQualifier(getRegionStateColumn())
.setTimestamp(put.getTimestamp())
.setType(Cell.Type.Put)
.setValue(Bytes.toBytes(state.name()))
.build());
}
内容来源于网络,如有侵权,请联系作者删除!