org.nd4j.linalg.factory.Nd4j.getBlasWrapper()方法的使用及代码示例

x33g5p2x  于2022-01-24 转载在 其他  
字(10.3k)|赞(0)|评价(0)|浏览(146)

本文整理了Java中org.nd4j.linalg.factory.Nd4j.getBlasWrapper()方法的一些代码示例,展示了Nd4j.getBlasWrapper()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Nd4j.getBlasWrapper()方法的具体详情如下:
包路径:org.nd4j.linalg.factory.Nd4j
类名称:Nd4j
方法名:getBlasWrapper

Nd4j.getBlasWrapper介绍

暂无

代码示例

代码示例来源:origin: deeplearning4j/dl4j-examples

public static void main(String[] args) {

    Nd4j.setDataType(DataBuffer.Type.DOUBLE);
    INDArray arr = Nd4j.create(300);
    double numTimes = 10000000;
    double total = 0;

    for(int i = 0; i < numTimes; i++) {
      long start = System.nanoTime();
      Nd4j.getBlasWrapper().axpy(new Integer(1), arr,arr);
      long after = System.nanoTime();
      long add = Math.abs(after - start);
      System.out.println("Took " + add);
      total += Math.abs(after - start);
    }
    System.out.println("Avg time " + (total / numTimes));
  }
}

代码示例来源:origin: deeplearning4j/nd4j

@Override
public Map<Integer, Double> labelCounts() {
  Map<Integer, Double> ret = new HashMap<>();
  if (labels == null)
    return ret;
  long nTensors = labels.tensorssAlongDimension(1);
  for (int i = 0; i < nTensors; i++) {
    INDArray row = labels.tensorAlongDimension(i, 1);
    INDArray javaRow = labels.javaTensorAlongDimension(i, 1);
    int maxIdx = Nd4j.getBlasWrapper().iamax(row);
    int maxIdxJava = Nd4j.getBlasWrapper().iamax(javaRow);
    if (maxIdx < 0)
      throw new IllegalStateException("Please check the iamax implementation for "
              + Nd4j.getBlasWrapper().getClass().getName());
    if (ret.get(maxIdx) == null)
      ret.put(maxIdx, 1.0);
    else
      ret.put(maxIdx, ret.get(maxIdx) + 1.0);
  }
  return ret;
}

代码示例来源:origin: deeplearning4j/nd4j

@Override
public int outcome() {
  return Nd4j.getBlasWrapper().iamax(getLabels());
}

代码示例来源:origin: deeplearning4j/nd4j

/**
 * Scale by 1 / norm2 of the matrix
 *
 * @param toScale the ndarray to scale
 * @return the scaled ndarray
 */
public static INDArray unitVec(INDArray toScale) {
  double length = toScale.norm2Number().doubleValue();
  if (length > 0) {
    if (toScale.data().dataType() == (DataBuffer.Type.FLOAT))
      return Nd4j.getBlasWrapper().scal(1.0f / (float) length, toScale);
    else
      return Nd4j.getBlasWrapper().scal(1.0 / length, toScale);
  }
  return toScale;
}

代码示例来源:origin: deeplearning4j/nd4j

/**
 * Compute generalized eigenvalues of the problem A x = L x.
 * Matrix A is modified in the process, holding eigenvectors after execution.
 *
 * @param A symmetric Matrix A. After execution, A will contain the eigenvectors as columns
 * @return a vector of eigenvalues L.
 */
public static INDArray symmetricGeneralizedEigenvalues(INDArray A) {
  INDArray eigenvalues = Nd4j.create(A.rows());
  Nd4j.getBlasWrapper().syev('V', 'L', A, eigenvalues);
  return eigenvalues;
}

代码示例来源:origin: deeplearning4j/nd4j

/** Matrix multiply: Implements c = alpha*op(a)*op(b) + beta*c where op(X) means transpose X (or not)
 * depending on setting of arguments transposeA and transposeB.<br>
 * Note that matrix c MUST be fortran order, have zero offset and have c.data().length == c.length().
 * An exception will be thrown otherwise.<br>
 * Don't use this unless you know about level 3 blas and NDArray storage orders.
 * @param a First matrix
 * @param b Second matrix
 * @param c result matrix. Used in calculation (assuming beta != 0) and result is stored in this. f order,
 *          zero offset and length == data.length only
 * @param transposeA if true: transpose matrix a before mmul
 * @param transposeB if true: transpose matrix b before mmul
 * @return result, i.e., matrix c is returned for convenience
 */
public static INDArray gemm(INDArray a,
              INDArray b,
              INDArray c,
              boolean transposeA,
              boolean transposeB,
              double alpha,
              double beta) {
  getBlasWrapper().level3().gemm(a, b, c, transposeA, transposeB, alpha, beta);
  return c;
}

代码示例来源:origin: deeplearning4j/nd4j

/**
 * in place subtraction of two matrices
 *
 * @param other  the second ndarray to subtract
 * @param result the result ndarray
 * @return the result of the subtraction
 */
@Override
public IComplexNDArray subi(INDArray other, INDArray result) {
  IComplexNDArray cOther = (IComplexNDArray) other;
  IComplexNDArray cResult = (IComplexNDArray) result;
  if (other.isScalar())
    return subi(cOther.getComplex(0), result);
  if (result == this)
    Nd4j.getBlasWrapper().axpy(Nd4j.NEG_UNIT, cOther, cResult);
  else if (result == other) {
    if (data.dataType() == (DataBuffer.Type.DOUBLE)) {
      Nd4j.getBlasWrapper().scal(Nd4j.NEG_UNIT.asDouble(), cResult);
      Nd4j.getBlasWrapper().axpy(Nd4j.UNIT, this, cResult);
    } else {
      Nd4j.getBlasWrapper().scal(Nd4j.NEG_UNIT.asFloat(), cResult);
      Nd4j.getBlasWrapper().axpy(Nd4j.UNIT, this, cResult);
    }
  } else {
    Nd4j.getBlasWrapper().copy(this, result);
    Nd4j.getBlasWrapper().axpy(Nd4j.NEG_UNIT, cOther, cResult);
  }
  return cResult;
}

代码示例来源:origin: deeplearning4j/nd4j

/**
 * Compute generalized eigenvalues of the problem A x = L x.
 * Matrix A is modified in the process, holding eigenvectors as columns after execution.
 *
 * @param A symmetric Matrix A. After execution, A will contain the eigenvectors as columns
 * @param calculateVectors if false, it will not modify A and calculate eigenvectors
 * @return a vector of eigenvalues L.
 */
public static INDArray symmetricGeneralizedEigenvalues(INDArray A, boolean calculateVectors) {
  INDArray eigenvalues = Nd4j.create(A.rows());
  Nd4j.getBlasWrapper().syev('V', 'L', (calculateVectors ? A : A.dup()), eigenvalues);
  return eigenvalues;
}

代码示例来源:origin: deeplearning4j/nd4j

@Override
public INDArray mmul(INDArray other) {
  long[] shape = {rows(), other.columns()};
  INDArray result = createUninitialized(shape, 'f');
  if (result.isScalar())
    return Nd4j.scalar(Nd4j.getBlasWrapper().dot(this, other));
  return mmuli(other, result);
}

代码示例来源:origin: deeplearning4j/nd4j

/**
 * Computes the eigenvalues of a general matrix.
 */
public static IComplexNDArray eigenvalues(INDArray A) {
  assert A.rows() == A.columns();
  INDArray WR = Nd4j.create(A.rows(), A.rows());
  INDArray WI = WR.dup();
  Nd4j.getBlasWrapper().geev('N', 'N', A.dup(), WR, WI, dummy, dummy);
  return Nd4j.createComplex(WR, WI);
}

代码示例来源:origin: deeplearning4j/nd4j

/**
 * Perform a copy matrix multiplication
 *
 * @param other the other matrix to perform matrix multiply with
 * @return the result of the matrix multiplication
 */
@Override
public INDArray mmul(INDArray other) {
  // FIXME: for 1D case, we probably want vector output here?
  long[] shape = {rows(), other.rank() == 1 ? 1 : other.columns()};
  INDArray result = createUninitialized(shape, 'f');
  if (result.isScalar())
    return Nd4j.scalar(Nd4j.getBlasWrapper().dot(this, other));
  return mmuli(other, result);
}

代码示例来源:origin: deeplearning4j/nd4j

/**
 * Compute generalized eigenvalues of the problem A x = L B x.
 * The data will be unchanged, no eigenvectors returned.
 *
 * @param A symmetric Matrix A.
 * @param B symmetric Matrix B.
 * @return a vector of eigenvalues L.
 */
public static INDArray symmetricGeneralizedEigenvalues(INDArray A, INDArray B) {
  assert A.rows() == A.columns();
  assert B.rows() == B.columns();
  INDArray W = Nd4j.create(A.rows());
  A = InvertMatrix.invert(B, false).mmuli(A);
  Nd4j.getBlasWrapper().syev('V', 'L', A, W);
  return W;
}

代码示例来源:origin: deeplearning4j/nd4j

/**
 * Returns a column vector where each entry is the nth bilinear
 * product of the nth slices of the two tensors.
 */
@Override
public INDArray bilinearProducts(INDArray curr, INDArray in) {
  assert curr.shape().length == 3;
  if (in.columns() != 1) {
    throw new AssertionError("Expected a column vector");
  }
  if (in.rows() != curr.size(curr.shape().length - 1)) {
    throw new AssertionError("Number of rows in the input does not match number of columns in tensor");
  }
  if (curr.size(curr.shape().length - 2) != curr.size(curr.shape().length - 1)) {
    throw new AssertionError("Can only perform this operation on a SimpleTensor with square slices");
  }
  INDArray ret = Nd4j.create(curr.slices(), 1);
  INDArray inT = in.transpose();
  for (int i = 0; i < curr.slices(); i++) {
    INDArray slice = curr.slice(i);
    INDArray inTTimesSlice = inT.mmul(slice);
    ret.putScalar(i, Nd4j.getBlasWrapper().dot(inTTimesSlice, in));
  }
  return ret;
}

代码示例来源:origin: deeplearning4j/nd4j

Nd4j.getBlasWrapper().axpy(Nd4j.UNIT, cOther, cResult);
} else if (result == other) {
  Nd4j.getBlasWrapper().axpy(Nd4j.UNIT, this, cResult);
} else {
  INDArray resultLinear = result.linearView();

代码示例来源:origin: deeplearning4j/nd4j

Nd4j.getBlasWrapper().level2().gemv(BlasBufferUtil.getCharForTranspose(temp),
          BlasBufferUtil.getCharForTranspose(this), Nd4j.UNIT, this, otherArray, Nd4j.ZERO, temp);
} else {
  Nd4j.getBlasWrapper().level3().gemm(BlasBufferUtil.getCharForTranspose(temp),
          BlasBufferUtil.getCharForTranspose(this), BlasBufferUtil.getCharForTranspose(other),
          Nd4j.UNIT, this, otherArray, Nd4j.ZERO, temp);
Nd4j.getBlasWrapper().copy(temp, resultArray);
  Nd4j.getBlasWrapper().level2().gemv(BlasBufferUtil.getCharForTranspose(resultArray),
          BlasBufferUtil.getCharForTranspose(this), Nd4j.UNIT, this, otherArray, Nd4j.ZERO,
          resultArray);
  Nd4j.getBlasWrapper().level3().gemm(BlasBufferUtil.getCharForTranspose(resultArray),
          BlasBufferUtil.getCharForTranspose(this), BlasBufferUtil.getCharForTranspose(other),
          Nd4j.UNIT, this, otherArray, Nd4j.ZERO, resultArray);

代码示例来源:origin: deeplearning4j/nd4j

/**
 * Compute generalized eigenvalues of the problem A x = L B x.
 * The data will be unchanged, no eigenvectors returned unless calculateVectors is true.
 * If calculateVectors == true, A will contain a matrix with the eigenvectors as columns.
 *
 * @param A symmetric Matrix A.
 * @param B symmetric Matrix B.
 * @return a vector of eigenvalues L.
 */
public static INDArray symmetricGeneralizedEigenvalues(INDArray A, INDArray B, boolean calculateVectors) {
  assert A.rows() == A.columns();
  assert B.rows() == B.columns();
  INDArray W = Nd4j.create(A.rows());
  if (calculateVectors)
    A.assign(InvertMatrix.invert(B, false).mmuli(A));
  else
    A = InvertMatrix.invert(B, false).mmuli(A);
  Nd4j.getBlasWrapper().syev('V', 'L', A, W);
  return W;
}

代码示例来源:origin: deeplearning4j/nd4j

Nd4j.getBlasWrapper().lapack().gesvd(A, s, null, VT);

代码示例来源:origin: deeplearning4j/nd4j

Nd4j.getBlasWrapper().level2().gemv(ordering(), BlasBufferUtil.getCharForTranspose(other), 1.0, this, other,
          0.0, gemmResultArr);
} else {
  Nd4j.getBlasWrapper().level3().gemm(ordering(), BlasBufferUtil.getCharForTranspose(other),
          BlasBufferUtil.getCharForTranspose(gemmResultArr), 1.0, this, other, 0.0, gemmResultArr);

代码示例来源:origin: deeplearning4j/nd4j

@Override
public INDArray sample(int[] shape) {
  int numRows = 1;
  for (int i = 0; i < shape.length - 1; i++)
    numRows *= shape[i];
  int numCols = shape[shape.length - 1];
  val flatShape = new int[]{numRows, numCols};
  val flatRng =  Nd4j.getExecutioner().exec(new GaussianDistribution(Nd4j.createUninitialized(flatShape, Nd4j.order()), 0.0, 1.0), random);
  long m = flatRng.rows();
  long n = flatRng.columns();
  val s = Nd4j.create(m < n ? m : n);
  val u = m < n ? Nd4j.create(m, n) : Nd4j.create(m, m);
  val v = Nd4j.create(n, n, 'f');
  Nd4j.getBlasWrapper().lapack().gesvd(flatRng, s, u, v);
  // FIXME: int cast
  if (gains == null) {
    if (u.rows() == numRows && u.columns() == numCols) {
      return v.get(NDArrayIndex.interval(0, numRows), NDArrayIndex.interval(0, numCols)).mul(gain).reshape(ArrayUtil.toLongArray(shape));
    } else {
      return u.get(NDArrayIndex.interval(0, numRows), NDArrayIndex.interval(0, numCols)).mul(gain).reshape(ArrayUtil.toLongArray(shape));
    }
  } else {
    throw new UnsupportedOperationException();
  }
}

代码示例来源:origin: deeplearning4j/nd4j

INDArray VL = Nd4j.create(A.rows(), A.rows());
Nd4j.getBlasWrapper().geev('v', 'v', A.dup(), WR, WI, VL, VR);

相关文章