org.nd4j.linalg.factory.Nd4j.zeros()方法的使用及代码示例

x33g5p2x  于2022-01-24 转载在 其他  
字(9.7k)|赞(0)|评价(0)|浏览(199)

本文整理了Java中org.nd4j.linalg.factory.Nd4j.zeros()方法的一些代码示例,展示了Nd4j.zeros()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Nd4j.zeros()方法的具体详情如下:
包路径:org.nd4j.linalg.factory.Nd4j
类名称:Nd4j
方法名:zeros

Nd4j.zeros介绍

[英]Creates a row vector with the specified number of columns
[中]创建具有指定列数的行向量

代码示例

代码示例来源:origin: deeplearning4j/dl4j-examples

private static INDArray createFromCSC(double[] data, int[] rowIndices, int[] columnPointers, int[] shape){
    INDArray result = Nd4j.zeros(shape);
    int columns = shape[1];
    int dataIdx = 0;
    for(int i = 0; i < columns; i++){
      for(int k = dataIdx; k < (i == columnPointers.length-1 ? rowIndices.length : columnPointers[i+1]); k++, dataIdx++){
        int j = rowIndices[k];
        result.put(j, i, data[k]);
        //System.out.println("i = "+i+", k = "+k+ ", data[k] = "+data[k]+"\n matrix = "+result.toString());
      }
    }
    return result;
  }
}

代码示例来源:origin: deeplearning4j/dl4j-examples

@Override
  public DataSet call(String s) throws Exception {
    //Here: take a String, and map the characters to a one-hot representation
    Map<Character, Integer> cti = ctiBroadcast.getValue();
    int length = s.length();
    INDArray features = Nd4j.zeros(1, N_CHARS, length - 1);
    INDArray labels = Nd4j.zeros(1, N_CHARS, length - 1);
    char[] chars = s.toCharArray();
    int[] f = new int[3];
    int[] l = new int[3];
    for (int i = 0; i < chars.length - 2; i++) {
      f[1] = cti.get(chars[i]);
      f[2] = i;
      l[1] = cti.get(chars[i + 1]);   //Predict the next character given past and current characters
      l[2] = i;
      features.putScalar(f, 1.0);
      labels.putScalar(l, 1.0);
    }
    return new DataSet(features, labels);
  }
}

代码示例来源:origin: deeplearning4j/dl4j-examples

INDArray zeros = Nd4j.zeros(nRows, nColumns);

代码示例来源:origin: deeplearning4j/dl4j-examples

public DataSet convertDataSet(int num)  {
  int batchNumCount = 0;
  List<DataSet> dataSets = new ArrayList();
  FileSystem fs = CommonUtils.openHdfsConnect();
  try {
    while (batchNumCount != num && fileIterator.hasNext()) {
      ++ batchNumCount;
      String fullPath = fileIterator.next();
      Writable labelText = new Text(FilenameUtils.getBaseName((new File(fullPath)).getParent()));
      INDArray features = null;
      INDArray label =  Nd4j.zeros(1, labels.size()).putScalar(new int[]{0, labels.indexOf(labelText)}, 1);
      InputStream imageios = fs.open(new Path(fullPath));
      features = asMatrix(imageios);
      imageios.close();
      Nd4j.getAffinityManager().tagLocation(features, AffinityManager.Location.HOST);
      dataSets.add(new DataSet(features, label));
    }
  } catch (Exception e) {
    throw  new RuntimeException(e.getCause());
  } finally {
    CommonUtils.closeHdfsConnect(fs);
  }
  if (dataSets.size() == 0) {
    return new DataSet();
  } else {
    DataSet result = DataSet.merge( dataSets );
    return result;
  }
}

代码示例来源:origin: deeplearning4j/dl4j-examples

INDArray initializationInput = Nd4j.zeros(numSamples, intToChar.size(), initialization.length());
char[] init = initialization.toCharArray();
for (int i = 0; i < init.length; i++) {
  INDArray nextInput = Nd4j.zeros(numSamples, intToChar.size());

代码示例来源:origin: deeplearning4j/dl4j-examples

INDArray featuresMask = Nd4j.zeros(reviews.size(), maxLength);
INDArray labelsMask = Nd4j.zeros(reviews.size(), maxLength);

代码示例来源:origin: deeplearning4j/dl4j-examples

INDArray myArray = Nd4j.zeros(nRows, nColumns);

代码示例来源:origin: deeplearning4j/dl4j-examples

INDArray zerosColumn = Nd4j.zeros(3,1);
originalArray.put(new INDArrayIndex[]{NDArrayIndex.all(), NDArrayIndex.point(2)}, zerosColumn);     //All rows, column index 2
System.out.println("\n\n\nOriginal array, after put operation:\n" + originalArray);

代码示例来源:origin: deeplearning4j/dl4j-examples

INDArray initializationInput = Nd4j.zeros(numSamples, iter.inputColumns(), initialization.length());
char[] init = initialization.toCharArray();
for( int i=0; i<init.length; i++ ){
  INDArray nextInput = Nd4j.zeros(numSamples,iter.inputColumns());

代码示例来源:origin: deeplearning4j/nd4j

@Override
public INDArray toDense() {
  // Dummy way - going to use the conversion routines in level2 (?)
  INDArray result = Nd4j.zeros(shape());
  int[] pointersB = pointerB.asInt();
  int[] pointersE = pointerE.asInt();
  for (int row = 0; row < rows(); row++) {
    for (int idx = pointersB[row]; idx < pointersE[row]; idx++) {
      result.put(row, columnsPointers.getInt(idx), values.getNumber(idx));
    }
  }
  return result;
}

代码示例来源:origin: guoguibing/librec

@Override
protected void setup() throws LibrecException {
  super.setup();
  inputDim = numUsers;
  hiddenDim = conf.getInt("rec.hidden.dimension");
  learningRate = conf.getDouble("rec.iterator.learnrate");
  lambdaReg = conf.getDouble("rec.weight.regularization");
  numIterations = conf.getInt("rec.iterator.maximum");
  hiddenActivation = conf.get("rec.hidden.activation");
  outputActivation = conf.get("rec.output.activation");
  // transform the sparse matrix to INDArray
  int[] matrixShape = {numItems, numUsers};
  trainSet = Nd4j.zeros(matrixShape);
  trainSetMask = Nd4j.zeros(matrixShape);
  for (MatrixEntry me: trainMatrix) {
    trainSet.put(me.column(), me.row(), me.get());
    trainSetMask.put(me.column(), me.row(), 1);
  }
}

代码示例来源:origin: deeplearning4j/nd4j

private INDArray labelsMinusMu(INDArray labels, INDArray mu) {
  // Now that we have the mixtures, let's compute the negative
  // log likelihodd of the label against the 
  long nSamples = labels.size(0);
  long labelsPerSample = labels.size(1);
  // This worked, but was actually much
  // slower than the for loop below.
  // labels = samples, mixtures, labels
  // mu = samples, mixtures
  // INDArray labelMinusMu = labels
  //        .reshape('f', nSamples, labelsPerSample, 1)
  //        .repeat(2, mMixtures)
  //        .permute(0, 2, 1)
  //        .subi(mu);
  // The above code does the same thing as the loop below,
  // but it does it with index magix instead of a for loop.
  // It turned out to be way less efficient than the simple 'for' here.
  INDArray labelMinusMu = Nd4j.zeros(nSamples, mMixtures, labelsPerSample);
  for (int k = 0; k < mMixtures; k++) {
    labelMinusMu.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.point(k), NDArrayIndex.all()},
            labels);
  }
  labelMinusMu.subi(mu);
  return labelMinusMu;
}

代码示例来源:origin: deeplearning4j/nd4j

/**
 * Converts the sparse ndarray into a dense one
 * @return a dense ndarray
 */
@Override
public INDArray toDense() {
  // TODO support view conversion
  INDArray result = Nd4j.zeros(shape());
  switch (data().dataType()) {
    case DOUBLE:
      for (int i = 0; i < length; i++) {
        int[] idx = getUnderlyingIndicesOf(i).asInt();
        double value = values.getDouble(i);
        result.putScalar(idx, value);
      }
      break;
    case FLOAT:
      for (int i = 0; i < length; i++) {
        int[] idx = getUnderlyingIndicesOf(i).asInt();
        float value = values.getFloat(i);
        result.putScalar(idx, value);
      }
      break;
    default:
      throw new UnsupportedOperationException();
  }
  return result;
}

代码示例来源:origin: guoguibing/librec

@Override
protected void setup() throws LibrecException {
  super.setup();
  inputDim = numItems;
  hiddenDim = conf.getInt("rec.hidden.dimension");
  learningRate = conf.getDouble("rec.iterator.learnrate");
  lambdaReg = conf.getDouble("rec.weight.regularization");
  numIterations = conf.getInt("rec.iterator.maximum");
  hiddenActivation = conf.get("rec.hidden.activation");
  outputActivation = conf.get("rec.output.activation");
  // transform the sparse matrix to INDArray
  // the sparse training matrix has been binarized
  int[] matrixShape = {numUsers, numItems};
  trainSet = Nd4j.zeros(matrixShape);
  for (MatrixEntry me: trainMatrix) {
    trainSet.put(me.row(), me.column(), me.get());
  }
}

代码示例来源:origin: deeplearning4j/nd4j

public static boolean checkMulManually(INDArray first, INDArray second, double maxRelativeDifference,
        double minAbsDifference) {
  //No apache commons element-wise multiply, but can do this manually
  INDArray result = first.mul(second);
  long[] shape = first.shape();
  INDArray expected = Nd4j.zeros(first.shape());
  for (int i = 0; i < shape[0]; i++) {
    for (int j = 0; j < shape[1]; j++) {
      double v = first.getDouble(i, j) * second.getDouble(i, j);
      expected.putScalar(new int[] {i, j}, v);
    }
  }
  if (!checkShape(expected, result))
    return false;
  boolean ok = checkEntries(expected, result, maxRelativeDifference, minAbsDifference);
  if (!ok) {
    INDArray onCopies = Shape.toOffsetZeroCopy(first).mul(Shape.toOffsetZeroCopy(second));
    printFailureDetails(first, second, expected, result, onCopies, "mul");
  }
  return ok;
}

代码示例来源:origin: deeplearning4j/nd4j

public static boolean checkDivManually(INDArray first, INDArray second, double maxRelativeDifference,
        double minAbsDifference) {
  //No apache commons element-wise division, but can do this manually
  INDArray result = first.div(second);
  long[] shape = first.shape();
  INDArray expected = Nd4j.zeros(first.shape());
  for (int i = 0; i < shape[0]; i++) {
    for (int j = 0; j < shape[1]; j++) {
      double v = first.getDouble(i, j) / second.getDouble(i, j);
      expected.putScalar(new int[] {i, j}, v);
    }
  }
  if (!checkShape(expected, result))
    return false;
  boolean ok = checkEntries(expected, result, maxRelativeDifference, minAbsDifference);
  if (!ok) {
    INDArray onCopies = Shape.toOffsetZeroCopy(first).mul(Shape.toOffsetZeroCopy(second));
    printFailureDetails(first, second, expected, result, onCopies, "div");
  }
  return ok;
}

代码示例来源:origin: deeplearning4j/nd4j

public INDArray getGradient(INDArray gradient, int slice, int[] shape) {
  boolean historicalInitialized = false;
  INDArray sqrtHistory;
  if (this.historicalGradient == null) {
    this.historicalGradient = Nd4j.zeros(shape).add(epsilon);
    historicalInitialized = true;
  } else if (!this.historicalGradient.isVector()
          && this.historicalGradient.slice(slice).length() != gradient.length())
    throw new IllegalArgumentException("Illegal gradient");
  if (historicalGradient.isVector())
    sqrtHistory = sqrt(historicalGradient);
  else
    sqrtHistory = !historicalInitialized ? sqrt(historicalGradient.slice(slice)) : historicalGradient;
  INDArray learningRates;
  try {
    learningRates = sqrtHistory.rdivi(learningRate);
  } catch (ArithmeticException ae) {
    learningRates = sqrtHistory.rdivi(learningRate + epsilon);
  }
  if (gradient.length() != learningRates.length())
    gradient.muli(learningRates.slice(slice));
  else
    gradient.muli(learningRates);
  this.historicalGradient.slice(slice).addi(gradient.mul(gradient));
  numIterations++;
  //ensure no zeros
  return gradient;
}

代码示例来源:origin: deeplearning4j/nd4j

INDArray bernoullis = Nd4j.zeros(labelMask.shape());
long currentTimeSliceEnd = label.size(2);

代码示例来源:origin: deeplearning4j/nd4j

std = (batchCount == 1) ? Nd4j.zeros(mean.shape()) : Transforms.pow(next.getFeatureMatrix().std(0), 2);
  std.muli(batchCount);
} else {

代码示例来源:origin: deeplearning4j/nd4j

INDArray gradient = Nd4j.zeros(nSamples, preOutput.columns());

相关文章