本文整理了Java中org.nd4j.linalg.factory.Nd4j.sizeOfDataType()
方法的一些代码示例,展示了Nd4j.sizeOfDataType()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Nd4j.sizeOfDataType()
方法的具体详情如下:
包路径:org.nd4j.linalg.factory.Nd4j
类名称:Nd4j
方法名:sizeOfDataType
[英]This method returns sizeOf(currentDataType), in bytes
[中]此方法返回sizeOf(currentDataType),以字节为单位
代码示例来源:origin: deeplearning4j/nd4j
/**
* This method returns amount of shared memory required for this specific Aggregate.
* PLEASE NOTE: this method is especially important for CUDA backend. On CPU backend it might be ignored, depending on Aggregate.
*
* @return
*/
@Override
public int getSharedMemorySize() {
return (vectorLength * Nd4j.sizeOfDataType()) + 512;
}
代码示例来源:origin: deeplearning4j/nd4j
@Override
public int getSharedMemorySize() {
return (vectorLength * Nd4j.sizeOfDataType() * 2) + 512;
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* This method returns amount of shared memory required for this specific Aggregate.
* PLEASE NOTE: this method is especially important for CUDA backend. On CPU backend it might be ignored, depending on Aggregate.
*
* @return
*/
@Override
public int getSharedMemorySize() {
return (getThreadsPerInstance() * Nd4j.sizeOfDataType()) + 512;
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* This method returns amount of shared memory required for this specific Aggregate.
* PLEASE NOTE: this method is especially important for CUDA backend. On
* CPU backend it might be ignored, depending on Aggregate.
*
* @return
*/
@Override
public int getSharedMemorySize() {
return (getThreadsPerInstance() * Nd4j.sizeOfDataType()) + 256;
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* This method returns amount of shared memory required for this specific Aggregate.
* PLEASE NOTE: this method is especially important for CUDA backend. On CPU backend it might be ignored, depending on Aggregate.
*
* @return
*/
@Override
public int getSharedMemorySize() {
return (getThreadsPerInstance() * Nd4j.sizeOfDataType()) + 512;
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* This method returns sizeOf(currentDataType), in bytes
*
* @return number of bytes per element
*/
public static int sizeOfDataType() {
return sizeOfDataType(Nd4j.dataType());
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* This method returns memory used by this DataSet
*
* @return
*/
@Override
public long getMemoryFootprint() {
long reqMem = features.lengthLong() * Nd4j.sizeOfDataType();
reqMem += labels == null ? 0 : labels.lengthLong() * Nd4j.sizeOfDataType();
reqMem += featuresMask == null ? 0 : featuresMask.lengthLong() * Nd4j.sizeOfDataType();
reqMem += labelsMask == null ? 0 : labelsMask.lengthLong() * Nd4j.sizeOfDataType();
return reqMem;
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* This method returns memory used by this DataSet
*
* @return
*/
@Override
public long getMemoryFootprint() {
long reqMem = 0;
for (INDArray f : features)
reqMem += f == null ? 0 : f.lengthLong() * Nd4j.sizeOfDataType();
if (featuresMaskArrays != null)
for (INDArray f : featuresMaskArrays)
reqMem += f == null ? 0 : f.lengthLong() * Nd4j.sizeOfDataType();
if (labelsMaskArrays != null)
for (INDArray f : labelsMaskArrays)
reqMem += f == null ? 0 : f.lengthLong() * Nd4j.sizeOfDataType();
if (labels != null)
for (INDArray f : labels)
reqMem += f == null ? 0 : f.lengthLong() * Nd4j.sizeOfDataType();
return reqMem;
}
代码示例来源:origin: deeplearning4j/nd4j
requiredMemory += div;
long numElements = requiredMemory / Nd4j.sizeOfDataType(type);
代码示例来源:origin: deeplearning4j/nd4j
protected static DataBuffer internalCreateBuffer(double[] data) {
val perfX = PerformanceTracker.getInstance().helperStartTransaction();
val buffer = Nd4j.createBuffer(data);
PerformanceTracker.getInstance().helperRegisterTransaction(0, perfX, data.length * Nd4j.sizeOfDataType(), MemcpyDirection.HOST_TO_HOST);
return buffer;
}
代码示例来源:origin: deeplearning4j/nd4j
protected static DataBuffer internalCreateBuffer(float[] data, long offset) {
val perfX = PerformanceTracker.getInstance().helperStartTransaction();
val buffer = Nd4j.createBuffer(data, offset);
PerformanceTracker.getInstance().helperRegisterTransaction(0, perfX, data.length * Nd4j.sizeOfDataType(), MemcpyDirection.HOST_TO_HOST);
return buffer;
}
代码示例来源:origin: deeplearning4j/nd4j
protected static DataBuffer internalCreateBuffer(double[] data, long offset) {
val perfX = PerformanceTracker.getInstance().helperStartTransaction();
val buffer = Nd4j.createBuffer(data, offset);
PerformanceTracker.getInstance().helperRegisterTransaction(0, perfX, data.length * Nd4j.sizeOfDataType(), MemcpyDirection.HOST_TO_HOST);
return buffer;
}
代码示例来源:origin: deeplearning4j/nd4j
protected static DataBuffer internalCreateBuffer(int[] data, long offset) {
val perfX = PerformanceTracker.getInstance().helperStartTransaction();
val buffer = Nd4j.createBuffer(data, offset);
PerformanceTracker.getInstance().helperRegisterTransaction(0, perfX, data.length * Nd4j.sizeOfDataType(), MemcpyDirection.HOST_TO_HOST);
return buffer;
}
代码示例来源:origin: deeplearning4j/nd4j
protected static DataBuffer internalCreateBuffer(float[] data) {
val perfX = PerformanceTracker.getInstance().helperStartTransaction();
val buffer = Nd4j.createBuffer(data);
PerformanceTracker.getInstance().helperRegisterTransaction(0, perfX, data.length * Nd4j.sizeOfDataType(), MemcpyDirection.HOST_TO_HOST);
return buffer;
}
代码示例来源:origin: deeplearning4j/nd4j
protected static DataBuffer internalCreateBuffer(int[] data) {
val perfX = PerformanceTracker.getInstance().helperStartTransaction();
val buffer = Nd4j.createBuffer(data);
PerformanceTracker.getInstance().helperRegisterTransaction(0, perfX, data.length * Nd4j.sizeOfDataType(), MemcpyDirection.HOST_TO_HOST);
return buffer;
}
代码示例来源:origin: deeplearning4j/nd4j
/**
*
* @param data
* @param shape
* @param stride
* @param offset
* @param ordering
*/
public BaseNDArray(float[] data, int[] shape, int[] stride, long offset, char ordering) {
setShapeInformation(Nd4j.getShapeInfoProvider().createShapeInformation(shape, stride, offset,
Shape.elementWiseStride(shape, stride, ordering == 'f'), ordering));
if (data != null && data.length > 0) {
val perfD = PerformanceTracker.getInstance().helperStartTransaction();
this.data = internalCreateBuffer(data, offset);
PerformanceTracker.getInstance().helperRegisterTransaction(0, perfD, data.length * Nd4j.sizeOfDataType(), MemcpyDirection.HOST_TO_HOST);
if (offset >= data.length)
throw new IllegalArgumentException("invalid offset: must be < data.length");
}
init(shape, stride);
}
代码示例来源:origin: org.nd4j/nd4j-api
@Override
public int getSharedMemorySize() {
return (vectorLength * Nd4j.sizeOfDataType() * 2) + 512;
}
代码示例来源:origin: org.nd4j/nd4j-api
/**
* This method returns amount of shared memory required for this specific Aggregate.
* PLEASE NOTE: this method is especially important for CUDA backend. On CPU backend it might be ignored, depending on Aggregate.
*
* @return
*/
@Override
public int getSharedMemorySize() {
return (getThreadsPerInstance() * Nd4j.sizeOfDataType()) + 512;
}
代码示例来源:origin: org.nd4j/nd4j-api
/**
* This method returns sizeOf(currentDataType), in bytes
*
* @return number of bytes per element
*/
public static int sizeOfDataType() {
return sizeOfDataType(Nd4j.dataType());
}
代码示例来源:origin: org.nd4j/nd4j-api
/**
* This method returns amount of shared memory required for this specific Aggregate.
* PLEASE NOTE: this method is especially important for CUDA backend. On
* CPU backend it might be ignored, depending on Aggregate.
*
* @return
*/
@Override
public int getSharedMemorySize() {
return (getThreadsPerInstance() * Nd4j.sizeOfDataType()) + 256;
}
内容来源于网络,如有侵权,请联系作者删除!