本文整理了Java中org.sonar.duplications.block.Block.getStartLine()
方法的一些代码示例,展示了Block.getStartLine()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Block.getStartLine()
方法的具体详情如下:
包路径:org.sonar.duplications.block.Block
类名称:Block
方法名:getStartLine
暂无
代码示例来源:origin: SonarSource/sonarqube
private void reportClones(BlocksGroup beginGroup, BlocksGroup endGroup, int cloneLength) {
List<Block[]> pairs = beginGroup.pairs(endGroup, cloneLength);
ClonePart origin = null;
List<ClonePart> parts = new ArrayList<>();
for (int i = 0; i < pairs.size(); i++) {
Block[] pair = pairs.get(i);
Block firstBlock = pair[0];
Block lastBlock = pair[1];
ClonePart part = new ClonePart(firstBlock.getResourceId(),
firstBlock.getIndexInFile(),
firstBlock.getStartLine(),
lastBlock.getEndLine());
if (originResourceId.equals(part.getResourceId())) {
if (origin == null || part.getUnitStart() < origin.getUnitStart()) {
origin = part;
}
}
parts.add(part);
}
filter.add(CloneGroup.builder().setLength(cloneLength).setOrigin(origin).setParts(parts).build());
}
代码示例来源:origin: SonarSource/sonarqube
/**
* Given: 5 statements, block size is 3
* Expected: 4 blocks with correct index and with line numbers
*/
@Test
public void shouldBuildBlocksFromStatements() {
List<Statement> statements = createStatementsFromStrings("1", "2", "3", "4", "5", "6");
BlockChunker chunker = createChunkerWithBlockSize(3);
List<Block> blocks = chunker.chunk("resource", statements);
assertThat(blocks.size(), is(4));
assertThat(blocks.get(0).getIndexInFile(), is(0));
assertThat(blocks.get(0).getStartLine(), is(0));
assertThat(blocks.get(0).getEndLine(), is(2));
assertThat(blocks.get(1).getIndexInFile(), is(1));
assertThat(blocks.get(1).getStartLine(), is(1));
assertThat(blocks.get(1).getEndLine(), is(3));
}
代码示例来源:origin: SonarSource/sonarqube
public void insert(InputFile inputFile, Collection<Block> blocks) {
if (settings.isCrossProjectDuplicationEnabled()) {
int id = ((DefaultInputFile) inputFile).scannerId();
if (publisher.getWriter().hasComponentData(FileStructure.Domain.CPD_TEXT_BLOCKS, id)) {
throw new UnsupportedOperationException("Trying to save CPD tokens twice for the same file is not supported: " + inputFile.absolutePath());
}
final ScannerReport.CpdTextBlock.Builder builder = ScannerReport.CpdTextBlock.newBuilder();
publisher.getWriter().writeCpdTextBlocks(id, blocks.stream().map(block -> {
builder.clear();
builder.setStartLine(block.getStartLine());
builder.setEndLine(block.getEndLine());
builder.setStartTokenIndex(block.getStartUnit());
builder.setEndTokenIndex(block.getEndUnit());
builder.setHash(block.getBlockHash().toHexString());
return builder.build();
}).collect(Collectors.toList()));
}
for (Block block : blocks) {
mem.insert(block);
}
if (blocks.isEmpty()) {
LOG.debug("Not enough content in '{}' to have CPD blocks, it will not be part of the duplication detection", inputFile.relativePath());
}
indexedFiles.add(inputFile);
}
代码示例来源:origin: SonarSource/sonarqube
@Test
public void shouldBuildBlocks() {
TokensLine line1 = new TokensLine(0, 9, 1, Character.toString((char) 1));
TokensLine line2 = new TokensLine(10, 19, 2, Character.toString((char) 2));
TokensLine line3 = new TokensLine(20, 29, 3, Character.toString((char) 3));
List<Block> blocks = new PmdBlockChunker(2).chunk("resourceId", Arrays.asList(line1, line2, line3));
assertThat(blocks.size(), is(2));
Block block = blocks.get(0);
// assertThat(block.getLengthInUnits(), is(11));
assertThat(block.getStartLine(), is(1));
assertThat(block.getEndLine(), is(2));
assertThat(block.getBlockHash(), is(new ByteArray(1L * 31 + 2)));
block = blocks.get(1);
// assertThat(block.getLengthInUnits(), is(33));
assertThat(block.getStartLine(), is(2));
assertThat(block.getEndLine(), is(3));
assertThat(block.getBlockHash(), is(new ByteArray(2L * 31 + 3)));
}
代码示例来源:origin: SonarSource/sonarqube
/**
* {@inheritDoc}
* <p>
* <strong>Note that this implementation allows insertion of two blocks with same index for one resource.</strong>
* </p>
*/
@Override
public void insert(Block block) {
sorted = false;
ensureCapacity();
resourceIds[size] = block.getResourceId();
int[] hash = block.getBlockHash().toIntArray();
if (hash.length != hashInts) {
throw new IllegalArgumentException("Expected " + hashInts + " ints in hash, but got " + hash.length);
}
int offset = size * blockInts;
for (int i = 0; i < hashInts; i++) {
blockData[offset++] = hash[i];
}
blockData[offset++] = block.getIndexInFile();
blockData[offset++] = block.getStartLine();
blockData[offset++] = block.getEndLine();
blockData[offset++] = block.getStartUnit();
blockData[offset] = block.getEndUnit();
size++;
}
代码示例来源:origin: SonarSource/sonarqube
firstBlock.getResourceId(),
firstBlock.getIndexInFile(),
firstBlock.getStartLine(),
lastBlock.getEndLine());
代码示例来源:origin: SonarSource/sonarqube
@Test
public void testBuilder() {
ByteArray hash = new ByteArray(1);
Block block = Block.builder()
.setResourceId("resource")
.setBlockHash(hash)
.setIndexInFile(1)
.setLines(2, 3)
.setUnit(4, 5)
.build();
assertThat(block.getResourceId(), is("resource"));
assertThat(block.getBlockHash(), sameInstance(hash));
assertThat(block.getIndexInFile(), is(1));
assertThat(block.getStartLine(), is(2));
assertThat(block.getEndLine(), is(3));
assertThat(block.getStartUnit(), is(4));
assertThat(block.getEndUnit(), is(5));
}
代码示例来源:origin: org.codehaus.sonar/sonar-batch
public void insert(InputFile inputFile, Collection<Block> blocks) {
int resourceSnapshotId = getSnapshotIdFor(inputFile);
// TODO Godin: maybe remove conversion of blocks to units?
List<DuplicationUnitDto> units = Lists.newArrayList();
for (Block block : blocks) {
DuplicationUnitDto unit = new DuplicationUnitDto(
currentProjectSnapshotId,
resourceSnapshotId,
block.getBlockHash().toString(),
block.getIndexInFile(),
block.getStartLine(),
block.getEndLine());
units.add(unit);
}
dao.insert(units);
}
代码示例来源:origin: org.codehaus.sonar/sonar-duplications
private void reportClones(BlocksGroup beginGroup, BlocksGroup endGroup, int cloneLength) {
List<Block[]> pairs = beginGroup.pairs(endGroup, cloneLength);
ClonePart origin = null;
List<ClonePart> parts = Lists.newArrayList();
for (int i = 0; i < pairs.size(); i++) {
Block[] pair = pairs.get(i);
Block firstBlock = pair[0];
Block lastBlock = pair[1];
ClonePart part = new ClonePart(firstBlock.getResourceId(),
firstBlock.getIndexInFile(),
firstBlock.getStartLine(),
lastBlock.getEndLine());
if (originResourceId.equals(part.getResourceId())) {
if (origin == null) {
origin = part;
} else if (part.getUnitStart() < origin.getUnitStart()) {
origin = part;
}
}
parts.add(part);
}
filter.add(CloneGroup.builder().setLength(cloneLength).setOrigin(origin).setParts(parts).build());
}
代码示例来源:origin: org.codehaus.sonar/sonar-duplications
/**
* {@inheritDoc}
* <p>
* <strong>Note that this implementation allows insertion of two blocks with same index for one resource.</strong>
* </p>
*/
@Override
public void insert(Block block) {
sorted = false;
ensureCapacity();
resourceIds[size] = block.getResourceId();
int[] hash = block.getBlockHash().toIntArray();
if (hash.length != hashInts) {
throw new IllegalArgumentException("Expected " + hashInts + " ints in hash, but got " + hash.length);
}
int offset = size * blockInts;
for (int i = 0; i < hashInts; i++) {
blockData[offset++] = hash[i];
}
blockData[offset++] = block.getIndexInFile();
blockData[offset++] = block.getStartLine();
blockData[offset++] = block.getEndLine();
blockData[offset++] = block.getStartUnit();
blockData[offset] = block.getEndUnit();
size++;
}
代码示例来源:origin: org.sonarsource.sonarqube/sonar-scanner-engine
public void insert(InputFile inputFile, Collection<Block> blocks) {
if (settings.isCrossProjectDuplicationEnabled()) {
int id = ((DefaultInputFile) inputFile).scannerId();
if (publisher.getWriter().hasComponentData(FileStructure.Domain.CPD_TEXT_BLOCKS, id)) {
throw new UnsupportedOperationException("Trying to save CPD tokens twice for the same file is not supported: " + inputFile.absolutePath());
}
final ScannerReport.CpdTextBlock.Builder builder = ScannerReport.CpdTextBlock.newBuilder();
publisher.getWriter().writeCpdTextBlocks(id, blocks.stream().map(block -> {
builder.clear();
builder.setStartLine(block.getStartLine());
builder.setEndLine(block.getEndLine());
builder.setStartTokenIndex(block.getStartUnit());
builder.setEndTokenIndex(block.getEndUnit());
builder.setHash(block.getBlockHash().toHexString());
return builder.build();
}).collect(Collectors.toList()));
}
for (Block block : blocks) {
mem.insert(block);
}
if (blocks.isEmpty()) {
LOG.debug("Not enough content in '{}' to have CPD blocks, it will not be part of the duplication detection", inputFile.relativePath());
}
indexedFiles.add(inputFile);
}
代码示例来源:origin: org.codehaus.sonar/sonar-duplications
firstBlock.getResourceId(),
firstBlock.getIndexInFile(),
firstBlock.getStartLine(),
lastBlock.getEndLine());
代码示例来源:origin: org.sonarsource.sonarqube/sonar-batch
@Override
public BatchReport.CpdTextBlock apply(Block input) {
builder.clear();
builder.setStartLine(input.getStartLine());
builder.setEndLine(input.getEndLine());
builder.setStartTokenIndex(input.getStartUnit());
builder.setEndTokenIndex(input.getEndUnit());
builder.setHash(input.getBlockHash().toHexString());
return builder.build();
}
}));
内容来源于网络,如有侵权,请联系作者删除!