本文整理了Java中org.nd4j.linalg.factory.Nd4j.getAffinityManager()
方法的一些代码示例,展示了Nd4j.getAffinityManager()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Nd4j.getAffinityManager()
方法的具体详情如下:
包路径:org.nd4j.linalg.factory.Nd4j
类名称:Nd4j
方法名:getAffinityManager
暂无
代码示例来源:origin: deeplearning4j/nd4j
public DeviceLocal() {
int numDevices = Nd4j.getAffinityManager().getNumberOfDevices();
for (int i = 0; i < numDevices; i++) {
locksMap.add(new ReentrantReadWriteLock());
}
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* This method removes object stored for current device
*
*/
public void clear() {
int deviceId = Nd4j.getAffinityManager().getDeviceForCurrentThread();
try {
locksMap.get(deviceId).writeLock().lock();
backingMap.remove(deviceId);
} finally {
locksMap.get(deviceId).writeLock().unlock();
}
}
}
代码示例来源:origin: deeplearning4j/nd4j
private PerformanceTracker() {
// we put in initial holders, one per device
val nd = Nd4j.getAffinityManager().getNumberOfDevices();
for (int e = 0; e < nd; e++) {
bandwidth.put(e, new AveragingTransactionsHolder());
operations.put(e, new AveragingTransactionsHolder());
}
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* This method sets object for current device
*
* @param object
*/
public void set(T object) {
set(Nd4j.getAffinityManager().getDeviceForCurrentThread(), object);
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* This method duplicates array, and stores it to all devices
*
* @param array
*/
public void broadcast(INDArray array) {
if (array == null)
return;
Nd4j.getExecutioner().commit();
int numDevices = Nd4j.getAffinityManager().getNumberOfDevices();
for (int i = 0; i < numDevices; i++) {
// if current thread equal to this device - we just save it, without duplication
if (Nd4j.getAffinityManager().getDeviceForCurrentThread() == i) {
set(i, array);
} else {
set(i, Nd4j.getAffinityManager().replicateToDevice(i, array));
}
}
}
}
代码示例来源:origin: deeplearning4j/nd4j
/**
* This method returns object local to current deviceId
*
* @return
*/
@Nullable
public T get() {
return get(Nd4j.getAffinityManager().getDeviceForCurrentThread());
}
代码示例来源:origin: deeplearning4j/dl4j-examples
public DataSet convertDataSet(int num) {
int batchNumCount = 0;
List<DataSet> dataSets = new ArrayList();
FileSystem fs = CommonUtils.openHdfsConnect();
try {
while (batchNumCount != num && fileIterator.hasNext()) {
++ batchNumCount;
String fullPath = fileIterator.next();
Writable labelText = new Text(FilenameUtils.getBaseName((new File(fullPath)).getParent()));
INDArray features = null;
INDArray label = Nd4j.zeros(1, labels.size()).putScalar(new int[]{0, labels.indexOf(labelText)}, 1);
InputStream imageios = fs.open(new Path(fullPath));
features = asMatrix(imageios);
imageios.close();
Nd4j.getAffinityManager().tagLocation(features, AffinityManager.Location.HOST);
dataSets.add(new DataSet(features, label));
}
} catch (Exception e) {
throw new RuntimeException(e.getCause());
} finally {
CommonUtils.closeHdfsConnect(fs);
}
if (dataSets.size() == 0) {
return new DataSet();
} else {
DataSet result = DataSet.merge( dataSets );
return result;
}
}
代码示例来源:origin: deeplearning4j/nd4j
if (isDebug.get())
log.info("Workspace [{}] device_{}, current cycle: {}; max cycle: {}", id,
Nd4j.getAffinityManager().getDeviceForCurrentThread(), cycleAllocations.get(),
maxCycle.get());
代码示例来源:origin: deeplearning4j/nd4j
/**
* Setup the given byte buffer
* for serialization (note that this is for uncompressed INDArrays)
* 4 bytes int for rank
* 4 bytes for data opType
* shape buffer
* data buffer
*
* @param arr the array to setup
* @param allocated the byte buffer to setup
* @param rewind whether to rewind the byte buffer or nt
*/
public static void doByteBufferPutUnCompressed(INDArray arr, ByteBuffer allocated, boolean rewind) {
// ensure we send data to host memory
Nd4j.getExecutioner().commit();
Nd4j.getAffinityManager().ensureLocation(arr, AffinityManager.Location.HOST);
ByteBuffer buffer = arr.data().pointer().asByteBuffer().order(ByteOrder.nativeOrder());
ByteBuffer shapeBuffer = arr.shapeInfoDataBuffer().pointer().asByteBuffer().order(ByteOrder.nativeOrder());
//2 four byte ints at the beginning
allocated.putInt(arr.rank());
//put data opType next so its self describing
allocated.putInt(arr.data().dataType().ordinal());
allocated.put(shapeBuffer);
allocated.put(buffer);
if (rewind)
allocated.rewind();
}
代码示例来源:origin: deeplearning4j/nd4j
this.guid = Nd4j.getWorkspaceManager().getUUID();
this.memoryManager = Nd4j.getMemoryManager();
this.deviceId = Nd4j.getAffinityManager().getDeviceForCurrentThread();
代码示例来源:origin: deeplearning4j/dl4j-examples
.workers(Nd4j.getAffinityManager().getNumberOfDevices())
代码示例来源:origin: deeplearning4j/dl4j-examples
.prefetchBuffer(16 * Nd4j.getAffinityManager().getNumberOfDevices())
.reportScoreAfterAveraging(true)
.averagingFrequency(10)
.workers(Nd4j.getAffinityManager().getNumberOfDevices())
.build();
代码示例来源:origin: deeplearning4j/dl4j-examples
.workers(Nd4j.getAffinityManager().getNumberOfDevices())
代码示例来源:origin: org.deeplearning4j/deeplearning4j-parallel-wrapper_2.11
@Override
public Thread newThread(@NotNull Runnable r) {
Thread t = Executors.defaultThreadFactory().newThread(r);
int cThread = workerCounter.getAndIncrement();
t.setName("ParallelWrapper training thread " + cThread);
t.setDaemon(true);
t.setUncaughtExceptionHandler(handler);
Nd4j.getAffinityManager().attachThreadToDevice(t,
cThread % Nd4j.getAffinityManager().getNumberOfDevices());
return t;
}
});
代码示例来源:origin: org.nd4j/nd4j-cuda-7.5
public void init() {
int numDevices = Nd4j.getAffinityManager().getNumberOfDevices();
deviceCachedAmount = new ArrayList<>();
for (int i = 0; i < numDevices; i++) {
deviceCachedAmount.add(new AtomicLong());
}
}
代码示例来源:origin: org.nd4j/nd4j-cuda-10.0
public DeviceTADManager() {
int numDevices = Nd4j.getAffinityManager().getNumberOfDevices();
for (int i = 0; i < numDevices; i++) {
tadCache.add(i, new ConcurrentHashMap<TadDescriptor, Pair<DataBuffer, DataBuffer>>());
}
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-parallel-wrapper
@Override
public Thread newThread(@NotNull Runnable r) {
Thread t = Executors.defaultThreadFactory().newThread(r);
int cThread = workerCounter.getAndIncrement();
t.setName("ParallelWrapper training thread " + cThread);
t.setDaemon(true);
t.setUncaughtExceptionHandler(handler);
Nd4j.getAffinityManager().attachThreadToDevice(t,
cThread % Nd4j.getAffinityManager().getNumberOfDevices());
return t;
}
});
代码示例来源:origin: org.nd4j/nd4j-cuda-7.5
public EventsProvider() {
int numDev = Nd4j.getAffinityManager().getNumberOfDevices();
for (int i = 0; i < numDev; i++) {
queue.add(new ConcurrentLinkedQueue<cudaEvent_t>());
}
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-core
public MagicQueue build() {
if (numberOfBuckets < 1)
numberOfBuckets = Nd4j.getAffinityManager().getNumberOfDevices();
MagicQueue queue = new MagicQueue(numberOfBuckets, capacity, type);
queue.mode = this.mode;
return queue;
}
}
代码示例来源:origin: org.nd4j/nd4j-api
/**
* This method returns object local to current deviceId
*
* @return
*/
@Nullable
public T get() {
return get(Nd4j.getAffinityManager().getDeviceForCurrentThread());
}
内容来源于网络,如有侵权,请联系作者删除!