本文整理了Java中org.nd4j.linalg.factory.Nd4j.getExecutioner()
方法的一些代码示例,展示了Nd4j.getExecutioner()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Nd4j.getExecutioner()
方法的具体详情如下:
包路径:org.nd4j.linalg.factory.Nd4j
类名称:Nd4j
方法名:getExecutioner
[英]Get the operation executioner instance
[中]获取操作执行器实例
代码示例来源:origin: deeplearning4j/nd4j
@Override
public INDArray nextFloat(char order, int[] shape) {
INDArray array = Nd4j.createUninitialized(shape, order);
UniformDistribution op = new UniformDistribution(array, 0.0, 1.0);
Nd4j.getExecutioner().exec(op, this);
return array;
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* Sin function
* @param in
* @param copy
* @return
*/
public static INDArray sin(INDArray in, boolean copy) {
return Nd4j.getExecutioner().execAndReturn(new Sin((copy ? in.dup() : in)));
}
代码示例来源:origin: deeplearning4j/nd4j
@Override
public INDArray getActivation(INDArray in, boolean training) {
Nd4j.getExecutioner().execAndReturn(new SoftSign(in));
return in;
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* Returns non-normalized Shannon entropy along dimension
* @param dimension
* @return
*/
@Override
public INDArray shannonEntropy(int... dimension) {
return Nd4j.getExecutioner().exec(new ShannonEntropy(this), dimension);
}
代码示例来源:origin: deeplearning4j/nd4j
public static void checkForInf(INDArray z) {
if (Nd4j.getExecutioner().getProfilingMode() != OpExecutioner.ProfilingMode.INF_PANIC
&& Nd4j.getExecutioner().getProfilingMode() != OpExecutioner.ProfilingMode.ANY_PANIC)
return;
int match = 0;
if (!z.isScalar()) {
MatchCondition condition = new MatchCondition(z, Conditions.isInfinite());
match = Nd4j.getExecutioner().exec(condition, Integer.MAX_VALUE).getInt(0);
} else {
if (z.data().dataType() == DataBuffer.Type.DOUBLE) {
if (Double.isInfinite(z.getDouble(0)))
match = 1;
} else {
if (Float.isInfinite(z.getFloat(0)))
match = 1;
}
}
if (match > 0)
throw new ND4JIllegalStateException("P.A.N.I.C.! Op.Z() contains " + match + " Inf value(s)");
}
代码示例来源:origin: deeplearning4j/nd4j
@Override
public INDArray compress(INDArray array) {
INDArray dup = array.dup(array.ordering());
Nd4j.getExecutioner().commit();
dup.setData(compress(dup.data()));
dup.markAsCompressed(true);
return dup;
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* Log on arbitrary base
*
* @param ndArray
* @param base
* @return
*/
public static INDArray log(INDArray ndArray, double base, boolean duplicate) {
return Nd4j.getExecutioner().exec(new LogX(duplicate ? ndArray.dup(ndArray.ordering()) : ndArray, base)).z();
}
代码示例来源:origin: deeplearning4j/nd4j
@Override
public int iamax(long n, INDArray arr, int stride) {
if (Nd4j.getExecutioner().getProfilingMode() == OpExecutioner.ProfilingMode.ALL)
OpProfiler.getInstance().processBlasCall(false, arr);
if (arr.data().dataType() == DataBuffer.Type.DOUBLE) {
DefaultOpExecutioner.validateDataType(DataBuffer.Type.DOUBLE, arr);
return idamax(n, arr, stride);
} else {
DefaultOpExecutioner.validateDataType(DataBuffer.Type.FLOAT, arr);
return isamax(n, arr, stride);
}
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* Atan2 operation, new INDArray instance will be returned
* Note the order of x and y parameters is opposite to that of java.lang.Math.atan2
*
* @param x the abscissa coordinate
* @param y the ordinate coordinate
* @return the theta from point (r, theta) when converting (x,y) from to cartesian to polar coordinates
*/
public static INDArray atan2(@NonNull INDArray x, @NonNull INDArray y) {
return Nd4j.getExecutioner()
.execAndReturn(new OldAtan2Op(x, y, Nd4j.createUninitialized(x.shape(), x.ordering())));
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* Sin function
* @param in
* @param copy
* @return
*/
public static INDArray atanh(INDArray in, boolean copy) {
return Nd4j.getExecutioner().execAndReturn(new ATanh((copy ? in.dup() : in)));
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* Negate each element (in-place).
*/
@Override
public INDArray negi() {
Nd4j.getExecutioner().exec(new Negative(this));
return this;
}
代码示例来源:origin: deeplearning4j/nd4j
@Override
public INDArray nextGaussian(char order, int[] shape) {
INDArray array = Nd4j.createUninitialized(shape, order);
GaussianDistribution op = new GaussianDistribution(array, 0.0, 1.0);
Nd4j.getExecutioner().exec(op, this);
return array;
}
代码示例来源:origin: deeplearning4j/nd4j
@Override
public INDArray getActivation(INDArray in, boolean training) {
Nd4j.getExecutioner().execAndReturn(new RectifiedTanh(in));
return in;
}
代码示例来源:origin: deeplearning4j/nd4j
public static void checkForNaN(INDArray z) {
if (Nd4j.getExecutioner().getProfilingMode() != OpExecutioner.ProfilingMode.NAN_PANIC
&& Nd4j.getExecutioner().getProfilingMode() != OpExecutioner.ProfilingMode.ANY_PANIC)
return;
int match = 0;
if (!z.isScalar()) {
MatchCondition condition = new MatchCondition(z, Conditions.isNan());
match = Nd4j.getExecutioner().exec(condition, Integer.MAX_VALUE).getInt(0);
} else {
if (z.data().dataType() == DataBuffer.Type.DOUBLE) {
if (Double.isNaN(z.getDouble(0)))
match = 1;
} else {
if (Float.isNaN(z.getFloat(0)))
match = 1;
}
}
if (match > 0)
throw new ND4JIllegalStateException("P.A.N.I.C.! Op.Z() contains " + match + " NaN value(s): ");
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* swaps a vector with another vector.
*
* @param x
* @param y
*/
@Override
public void copy(INDArray x, INDArray y) {
if (Nd4j.getExecutioner().getProfilingMode() == OpExecutioner.ProfilingMode.ALL)
OpProfiler.getInstance().processBlasCall(false, x, y);
if (x.isSparse() || y.isSparse()) {
Nd4j.getSparseBlasWrapper().level1().copy(x, y);
return;
}
if (x.data().dataType() == DataBuffer.Type.DOUBLE) {
DefaultOpExecutioner.validateDataType(DataBuffer.Type.DOUBLE, x, y);
dcopy(x.length(), x, BlasBufferUtil.getBlasStride(x), y, BlasBufferUtil.getBlasStride(y));
} else {
DefaultOpExecutioner.validateDataType(DataBuffer.Type.FLOAT, x, y);
scopy(x.length(), x, BlasBufferUtil.getBlasStride(x), y, BlasBufferUtil.getBlasStride(y));
}
}
代码示例来源:origin: deeplearning4j/nd4j
/**
*
* @param in
* @param copy
* @return
*/
public static INDArray cos(INDArray in, boolean copy) {
return Nd4j.getExecutioner().execAndReturn(new Cos((copy ? in.dup() : in)));
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* Returns the product along a given dimension
*
* @param dimension the dimension to getScalar the product along
* @return the product along the specified dimension
*/
@Override
public INDArray prod(int... dimension) {
return Nd4j.getExecutioner().exec(new Prod(this), dimension);
}
代码示例来源:origin: deeplearning4j/nd4j
@Override
public INDArray nextGaussian(char order, long[] shape) {
INDArray array = Nd4j.createUninitialized(shape, order);
GaussianDistribution op = new GaussianDistribution(array, 0.0, 1.0);
Nd4j.getExecutioner().exec(op, this);
return array;
}
代码示例来源:origin: deeplearning4j/nd4j
@Override
public INDArray getActivation(INDArray in, boolean training) {
Nd4j.getExecutioner().execAndReturn(new Swish(in));
return in;
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* computes a vector by a scalar product.
*
* @param N
* @param alpha
* @param X
*/
@Override
public void scal(long N, double alpha, INDArray X) {
if (Nd4j.getExecutioner().getProfilingMode() == OpExecutioner.ProfilingMode.ALL)
OpProfiler.getInstance().processBlasCall(false, X);
if (X.isSparse()) {
Nd4j.getSparseBlasWrapper().level1().scal(N, alpha, X);
} else if (X.data().dataType() == DataBuffer.Type.DOUBLE)
dscal(N, alpha, X, BlasBufferUtil.getBlasStride(X));
else if (X.data().dataType() == DataBuffer.Type.FLOAT)
sscal(N, (float) alpha, X, BlasBufferUtil.getBlasStride(X));
else if (X.data().dataType() == DataBuffer.Type.HALF)
Nd4j.getExecutioner().exec(new ScalarMultiplication(X, alpha));
}
内容来源于网络,如有侵权,请联系作者删除!