本文整理了Java中org.apache.hadoop.record.Utils.compareBytes()
方法的一些代码示例,展示了Utils.compareBytes()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utils.compareBytes()
方法的具体详情如下:
包路径:org.apache.hadoop.record.Utils
类名称:Utils
方法名:compareBytes
[英]Lexicographic order of binary data.
[中]二进制数据的字典顺序。
代码示例来源:origin: io.hops/hadoop-mapreduce-client-core
/**
* verify that hash equals to HMacHash(msg)
* @param newHash
* @return true if is the same
*/
private static boolean verifyHash(byte[] hash, byte[] msg, SecretKey key) {
byte[] msg_hash = generateByteHash(msg, key);
return Utils.compareBytes(msg_hash, 0, msg_hash.length, hash, 0, hash.length) == 0;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core
/**
* verify that hash equals to HMacHash(msg)
* @param newHash
* @return true if is the same
*/
private static boolean verifyHash(byte[] hash, byte[] msg, SecretKey key) {
byte[] msg_hash = generateByteHash(msg, key);
return Utils.compareBytes(msg_hash, 0, msg_hash.length, hash, 0, hash.length) == 0;
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-core
/**
* verify that hash equals to HMacHash(msg)
* @param newHash
* @return true if is the same
*/
private static boolean verifyHash(byte[] hash, byte[] msg, SecretKey key) {
byte[] msg_hash = generateByteHash(msg, key);
return Utils.compareBytes(msg_hash, 0, msg_hash.length, hash, 0, hash.length) == 0;
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred
/**
* verify that hash equals to HMacHash(msg)
* @param newHash
* @return true if is the same
*/
private static boolean verifyHash(byte[] hash, byte[] msg, SecretKey key) {
byte[] msg_hash = generateByteHash(msg, key);
return Utils.compareBytes(msg_hash, 0, msg_hash.length, hash, 0, hash.length) == 0;
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
/**
* verify that hash equals to HMacHash(msg)
* @param newHash
* @return true if is the same
*/
private static boolean verifyHash(byte[] hash, byte[] msg, SecretKey key) {
byte[] msg_hash = generateByteHash(msg, key);
return Utils.compareBytes(msg_hash, 0, msg_hash.length, hash, 0, hash.length) == 0;
}
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
static public int compareRaw(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
try {
int os1 = s1;
{
int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
}
return (os1 - s1);
} catch(java.io.IOException e) {
throw new RuntimeException(e);
}
}
public int compare(byte[] b1, int s1, int l1,
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
static public int compareRaw(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
try {
int os1 = s1;
{
int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
}
return (os1 - s1);
} catch(java.io.IOException e) {
throw new RuntimeException(e);
}
}
public int compare(byte[] b1, int s1, int l1,
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
static public int compareRaw(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
try {
int os1 = s1;
{
int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
}
return (os1 - s1);
} catch(java.io.IOException e) {
throw new RuntimeException(e);
}
}
public int compare(byte[] b1, int s1, int l1,
代码示例来源:origin: apache/chukwa
l1 -= z1;
l2 -= z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
s2, i2);
if (r1 != 0) {
l1 -= z1;
l2 -= z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
s2, i2);
if (r1 != 0) {
代码示例来源:origin: dnmilne/wikipediaminer
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
代码示例来源:origin: kermitt2/entity-fishing
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
代码示例来源:origin: apache/chukwa
l1 -= z1;
l2 -= z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
s2, i2);
if (r1 != 0) {
l1 -= z1;
l2 -= z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
s2, i2);
if (r1 != 0) {
代码示例来源:origin: dnmilne/wikipediaminer
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
代码示例来源:origin: dnmilne/wikipediaminer
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
代码示例来源:origin: kermitt2/entity-fishing
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
代码示例来源:origin: kermitt2/entity-fishing
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
代码示例来源:origin: apache/chukwa
l1 -= z1;
l2 -= z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1,
b2, s2, i2);
if (r1 != 0) {
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1+=z1; s2+=z2; l1-=z1; l2-=z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);
if (r1 != 0) { return (r1<0)?-1:0; }
s1+=i1; s2+=i2; l1-=i1; l1-=i2;
内容来源于网络,如有侵权,请联系作者删除!