本文整理了Java中org.datavec.api.writable.Writable
类的一些代码示例,展示了Writable
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Writable
类的具体详情如下:
包路径:org.datavec.api.writable.Writable
类名称:Writable
暂无
代码示例来源:origin: org.datavec/datavec-spark_2.11
switch (schema.getColumnTypes().get(i)) {
case Double:
values[i + 2] = step.get(i).toDouble();
break;
case Integer:
values[i + 2] = step.get(i).toInt();
break;
case Long:
values[i + 2] = step.get(i).toLong();
break;
case Float:
values[i + 2] = step.get(i).toFloat();
break;
default:
代码示例来源:origin: org.datavec/datavec-data-nlp
protected String toString(Collection<Writable> record) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(bos);
for (Writable w : record) {
if (w instanceof Text) {
try {
w.write(dos);
} catch (IOException e) {
e.printStackTrace();
}
}
}
return new String(bos.toByteArray());
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-datavec-iterators
int classIdx = w.toInt();
if (classIdx >= details.oneHotNumClasses) {
throw new IllegalStateException("Cannot convert sequence writables to one-hot: class index " + classIdx
"indexed, thus only values 0 to nClasses-1 are valid)");
arr.putScalar(i, w.toInt(), 1.0);
} else {
k += toPut.length();
} else {
arr.putScalar(i, k, w.toDouble());
k++;
代码示例来源:origin: org.datavec/datavec-spark_2.11
@Override
public double call(Writable writable) throws Exception {
return writable.toDouble();
}
}
代码示例来源:origin: org.datavec/datavec-spark
@Override
public int compare(List<Writable> o1, List<Writable> o2) {
return Integer.compare(o1.get(1).toInt(), o2.get(1).toInt());
}
});
代码示例来源:origin: org.datavec/datavec-spark
@Override
public LongAnalysisCounter add(Writable writable) {
long value = writable.toLong();
if (value == 0)
countZero++;
if (value == getMinValueSeen())
countMinValue++;
else if (value < getMinValueSeen()) {
countMinValue = 1;
}
if (value == getMaxValueSeen())
countMaxValue++;
else if (value > getMaxValueSeen()) {
countMaxValue = 1;
}
if (value >= 0) {
countPositive++;
} else {
countNegative++;
} ;
counter.merge((double) value);
return this;
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-core
int classIdx = w.toInt();
if (classIdx >= details.oneHotNumClasses) {
throw new DL4JException("Cannot convert sequence writables to one-hot: class index " + classIdx
+ " >= numClass (" + details.oneHotNumClasses + ")");
arr.putScalar(i, w.toInt(), 1.0);
} else {
k += toPut.length();
} else {
arr.putScalar(i, k, w.toDouble());
k++;
代码示例来源:origin: org.datavec/datavec-spark
@Override
public double call(Writable writable) throws Exception {
return writable.toDouble();
}
}
代码示例来源:origin: org.datavec/datavec-spark
@Override
public int compare(List<Writable> o1, List<Writable> o2) {
return Integer.compare(o1.get(1).toInt(), o2.get(1).toInt());
}
});
代码示例来源:origin: org.datavec/datavec-spark_2.11
@Override
public LongAnalysisCounter add(Writable writable) {
long value = writable.toLong();
if (value == 0)
countZero++;
if (value == getMinValueSeen())
countMinValue++;
else if (value < getMinValueSeen()) {
countMinValue = 1;
}
if (value == getMaxValueSeen())
countMaxValue++;
else if (value > getMaxValueSeen()) {
countMaxValue = 1;
}
if (value >= 0) {
countPositive++;
} else {
countNegative++;
} ;
counter.merge((double) value);
return this;
}
代码示例来源:origin: org.datavec/datavec-spark
switch (schema.getColumnTypes().get(i)) {
case Double:
values[i + 2] = step.get(i).toDouble();
break;
case Integer:
values[i + 2] = step.get(i).toInt();
break;
case Long:
values[i + 2] = step.get(i).toLong();
break;
case Float:
values[i + 2] = step.get(i).toFloat();
break;
default:
代码示例来源:origin: org.deeplearning4j/deeplearning4j-datavec-iterators
j += row.length();
} else {
arr.putScalar(i, j, k, w.toDouble());
j++;
w = iter.next();
int classIdx = w.toInt();
if (classIdx >= details.oneHotNumClasses) {
throw new IllegalStateException("Cannot convert sequence writables to one-hot: class index " + classIdx
arr.putScalar(i, l++, k, w.toDouble());
代码示例来源:origin: org.datavec/datavec-spark_2.11
@Override
public HistogramCounter add(Writable w) {
double d = w.toDouble();
//Not super efficient, but linear search on 20-50 items should be good enough
int idx = -1;
for (int i = 0; i < nBins; i++) {
if (d >= bins[i] && d < bins[i + 1]) {
idx = i;
break;
}
}
if (idx == -1)
idx = nBins - 1;
binCounts[idx]++;
return this;
}
代码示例来源:origin: org.datavec/datavec-spark_2.11
@Override
public int compare(List<Writable> o1, List<Writable> o2) {
return Integer.compare(o1.get(1).toInt(), o2.get(1).toInt());
}
});
代码示例来源:origin: org.datavec/datavec-hadoop
for (Writable writable : record) {
Writable newWritable;
if (writable.getType() == WritableType.Text) {
switch (convertTextTo) {
case Byte:
newWritable = new ByteWritable((byte) writable.toInt());
break;
case Double:
newWritable = new DoubleWritable(writable.toDouble());
break;
case Float:
newWritable = new FloatWritable(writable.toFloat());
break;
case Int:
newWritable = new IntWritable(writable.toInt());
break;
case Long:
newWritable = new org.datavec.api.writable.LongWritable(writable.toLong());
break;
default:
代码示例来源:origin: org.deeplearning4j/deeplearning4j-core
j += row.length();
} else {
arr.putScalar(i, j, k, w.toDouble());
j++;
w = iter.next();
int classIdx = w.toInt();
if (classIdx >= details.oneHotNumClasses) {
throw new DL4JException("Cannot convert sequence writables to one-hot: class index " + classIdx
arr.putScalar(i, l++, k, w.toDouble());
代码示例来源:origin: org.datavec/datavec-spark
@Override
public HistogramCounter add(Writable w) {
double d = w.toDouble();
//Not super efficient, but linear search on 20-50 items should be good enough
int idx = -1;
for (int i = 0; i < nBins; i++) {
if (d >= bins[i] && d < bins[i + 1]) {
idx = i;
break;
}
}
if (idx == -1)
idx = nBins - 1;
binCounts[idx]++;
return this;
}
代码示例来源:origin: org.datavec/datavec-spark_2.11
@Override
public int compare(List<Writable> o1, List<Writable> o2) {
return Integer.compare(o1.get(1).toInt(), o2.get(1).toInt());
}
});
代码示例来源:origin: org.datavec/datavec-spark_2.11
switch (schema.getColumnTypes().get(i)) {
case Double:
values[i] = v1.get(i).toDouble();
break;
case Integer:
values[i] = v1.get(i).toInt();
break;
case Long:
values[i] = v1.get(i).toLong();
break;
case Float:
values[i] = v1.get(i).toFloat();
break;
default:
代码示例来源:origin: org.datavec/datavec-spark_2.11
@Override
public DoubleAnalysisCounter add(Writable writable) {
double value = writable.toDouble();
if (value == 0)
countZero++;
if (value == Double.NaN)
countNaN++;
if (value == getMinValueSeen())
countMinValue++;
else if (value < getMinValueSeen()) {
countMinValue = 1;
}
if (value == getMaxValueSeen())
countMaxValue++;
else if (value > getMaxValueSeen()) {
countMaxValue = 1;
}
if (value >= 0) {
countPositive++;
} else {
countNegative++;
} ;
counter.merge(value);
return this;
}
内容来源于网络,如有侵权,请联系作者删除!