@Override public int compare(List<T> left, List<T> right) { int compareLength = min(left.size(), right.size()); for (int i = 0; i < compareLength; i++) { int result = elementComparator.compare(left.get(i), right.get(i)); if (result != 0) { return result; } } return Integer.compare(left.size(), right.size()); } }
@Override public int processors( int delta ) { targetNumberOfProcessors = max( 1, min( targetNumberOfProcessors + delta, maxProcessors ) ); return targetNumberOfProcessors; }
/** * Ensures that an integer is within a given range. * * @param i The number to check. * @param min The minimum value to return. * @param max The maximum value to return. * @return {@code i} if the number is between {@code min} and {@code max}, * {@code min} if {@code i} is less than the minimum, * {@code max} if {@code i} is greater than the maximum. */ private static int intRange(int i, int min, int max) { return Integer.min(Integer.max(min, i), max); }
@Override public IdRange nextIdBatch( int size ) { int sizeLeft = size; long[] rangeDefrag = EMPTY_LONG_ARRAY; if ( position < defrag.length ) { // There are defragged ids to grab int numberOfDefrags = min( sizeLeft, defrag.length - position ); rangeDefrag = Arrays.copyOfRange( defrag, position, numberOfDefrags + position ); position += numberOfDefrags; sizeLeft -= numberOfDefrags; } long rangeStart = 0; int rangeLength = 0; int rangeOffset = currentRangeOffset(); int rangeAvailable = length - rangeOffset; if ( sizeLeft > 0 && rangeAvailable > 0 ) { rangeStart = start + rangeOffset; rangeLength = min( rangeAvailable, sizeLeft ); position += rangeLength; } return new IdRange( rangeDefrag, rangeStart, rangeLength ); }
private void writeDirectlyToOutputStream(byte[] bytes, int bytesOffset, int length) { if (compressor == null) { compressedOutputStream.writeBytes(bytes, bytesOffset, length); return; } while (length > 0) { int chunkSize = Integer.min(length, buffer.length); writeChunkToOutputStream(bytes, bytesOffset, chunkSize); length -= chunkSize; bytesOffset += chunkSize; } }
@Override int compareValue( GenericKey left, GenericKey right ) { if ( left.isHighestArray || right.isHighestArray ) { return Boolean.compare( left.isHighestArray, right.isHighestArray ); } int index = 0; int compare = 0; int length = min( left.arrayLength, right.arrayLength ); for ( ; compare == 0 && index < length; index++ ) { compare = arrayElementComparator.compare( left, right, index ); } return compare == 0 ? Integer.compare( left.arrayLength, right.arrayLength ) : compare; }
private Number getUpperBound(double error, List<? extends Number> rows, double percentile) { int medianIndex = (int) (rows.size() * percentile); int marginOfError = (int) (rows.size() * error / 2); return rows.get(min(medianIndex + marginOfError, rows.size() - 1)); } }
@Override void minimalSplitter( GenericKey left, GenericKey right, GenericKey into ) { int lastEqualIndex = -1; if ( left.type == right.type ) { int maxLength = min( left.arrayLength, right.arrayLength ); for ( int index = 0; index < maxLength; index++ ) { if ( arrayElementComparator.compare( left, right, index ) != 0 ) { break; } lastEqualIndex++; } } // Convert from last equal index to first index to differ +1 // Convert from index to length +1 // Total +2 int length = Math.min( right.arrayLength, lastEqualIndex + 2 ); copyValue( into, right, length ); into.arrayLength = length; }
@Override public int processors( int delta ) { if ( delta > 0 ) { numberOfProcessors = min( numberOfProcessors + delta, maxProcessors ); } else if ( delta < 0 ) { numberOfProcessors = max( 1, numberOfProcessors + delta ); } return numberOfProcessors; }
/** * Creates a ring bit set whose size is large enough to explicitly * represent bits with indices in the range {@code 0} through * {@code bitSetSize-1}. The bits from the source ring bit set are copied into the new ring bit set. * * @param bitSetSize the size of the ring bit set * @param sourceSet the source ring bit set * @throws NegativeArraySizeException if the specified initial size * is negative */ RingBitSet(int bitSetSize, RingBitSet sourceSet) { this(bitSetSize); int targetLength = Integer.min(bitSetSize, sourceSet.length); int sourceIndex = sourceSet.index; int forwardIndex = sourceSet.size - sourceIndex; for (int i = 0; i < targetLength; i++) { this.setNextBit(sourceSet.bitSet.get(sourceIndex)); // looping sourceIndex backwards without conditional statements forwardIndex = (forwardIndex + 1) % sourceSet.size; sourceIndex = (sourceSet.size - forwardIndex) % sourceSet.size; } }
@Override public synchronized IdRange nextIdBatch( int size ) { long[] defragIds = PrimitiveLongCollections.EMPTY_LONG_ARRAY; if ( freeList != null && !freeList.isEmpty() ) { defragIds = new long[min( size, freeList.size() )]; for ( int i = 0; i < defragIds.length; i++ ) { defragIds[i] = freeList.poll(); } size -= defragIds.length; } return new IdRange( defragIds, nextId.getAndAdd( size ), size ); }
private void assignProcessorsToPotentialBottleNeck( StageExecution execution, int permits ) { Pair<Step<?>,Float> bottleNeck = execution.stepsOrderedBy( Keys.avg_processing_time, false ).iterator().next(); Step<?> bottleNeckStep = bottleNeck.first(); long doneBatches = batches( bottleNeckStep ); if ( bottleNeck.other() > 1.0f && batchesPassedSinceLastChange( bottleNeckStep, doneBatches ) >= config.movingAverageSize() ) { // Assign 1/10th of the remaining permits. This will have processors being assigned more // aggressively in the beginning of the run int optimalProcessorIncrement = min( max( 1, (int) bottleNeck.other().floatValue() - 1 ), permits ); int before = bottleNeckStep.processors( 0 ); int after = bottleNeckStep.processors( max( optimalProcessorIncrement, permits / 10 ) ); if ( after > before ) { lastChangedProcessors.put( bottleNeckStep, doneBatches ); } } }
@Override public long add( Object id ) { String string = (String) id; byte[] bytes = UTF8.encode( string ); int length = bytes.length; if ( length > 0xFFFF ) { throw new IllegalArgumentException( string ); } long startOffset = offset; cache.setByte( offset++, 0, (byte) length ); cache.setByte( offset++, 0, (byte) (length >>> Byte.SIZE) ); current = cache.at( offset ); for ( int i = 0; i < length; ) { int bytesLeftToWrite = length - i; int bytesLeftInChunk = (int) (chunkSize - offset % chunkSize); int bytesToWriteInThisChunk = min( bytesLeftToWrite, bytesLeftInChunk ); for ( int j = 0; j < bytesToWriteInThisChunk; j++ ) { current.setByte( offset++, 0, bytes[i++] ); } if ( length > i ) { current = cache.at( offset ); } } return startOffset; }
@Override public Object get( long offset ) { int length = cache.getByte( offset++, 0 ) & 0xFF; length |= (cache.getByte( offset++, 0 ) & 0xFF) << Byte.SIZE; ByteArray array = cache.at( offset ); byte[] bytes = new byte[length]; for ( int i = 0; i < length; ) { int bytesLeftToRead = length - i; int bytesLeftInChunk = (int) (chunkSize - offset % chunkSize); int bytesToReadInThisChunk = min( bytesLeftToRead, bytesLeftInChunk ); for ( int j = 0; j < bytesToReadInThisChunk; j++ ) { bytes[i++] = array.getByte( offset++, 0 ); } if ( length > i ) { array = cache.at( offset ); } } return UTF8.decode( bytes ); }
int currentDotOnLine = currentProgressDot % dotsPerLine(); int progressDot = min( maxDot, dotOf( stashedProgress + progress ) ); int line = progressDot / dotsPerLine(); int dotOnLine = progressDot % dotsPerLine();
@Test public void testScheduleSplitsBatched() { StageExecutionPlan plan = createPlan(createFixedSplitSource(60, TestingSplit::createRemoteSplit)); NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService); SqlStageExecution stage = createSqlStageExecution(plan, nodeTaskMap); StageScheduler scheduler = getSourcePartitionedScheduler(plan, stage, nodeManager, nodeTaskMap, 7); for (int i = 0; i <= (60 / 7); i++) { ScheduleResult scheduleResult = scheduler.schedule(); // finishes when last split is fetched if (i == (60 / 7)) { assertEffectivelyFinished(scheduleResult, scheduler); } else { assertFalse(scheduleResult.isFinished()); } // never blocks assertTrue(scheduleResult.getBlocked().isDone()); // first three splits create new tasks assertEquals(scheduleResult.getNewTasks().size(), i == 0 ? 3 : 0); assertEquals(stage.getAllTasks().size(), 3); assertPartitionedSplitCount(stage, min((i + 1) * 7, 60)); } for (RemoteTask remoteTask : stage.getAllTasks()) { assertEquals(remoteTask.getPartitionedSplitCount(), 20); } stage.abort(); }
@Test public void testScheduleSplitsOneAtATime() { StageExecutionPlan plan = createPlan(createFixedSplitSource(60, TestingSplit::createRemoteSplit)); NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService); SqlStageExecution stage = createSqlStageExecution(plan, nodeTaskMap); StageScheduler scheduler = getSourcePartitionedScheduler(plan, stage, nodeManager, nodeTaskMap, 1); for (int i = 0; i < 60; i++) { ScheduleResult scheduleResult = scheduler.schedule(); // only finishes when last split is fetched if (i == 59) { assertEffectivelyFinished(scheduleResult, scheduler); } else { assertFalse(scheduleResult.isFinished()); } // never blocks assertTrue(scheduleResult.getBlocked().isDone()); // first three splits create new tasks assertEquals(scheduleResult.getNewTasks().size(), i < 3 ? 1 : 0); assertEquals(stage.getAllTasks().size(), i < 3 ? i + 1 : 3); assertPartitionedSplitCount(stage, min(i + 1, 60)); } for (RemoteTask remoteTask : stage.getAllTasks()) { assertEquals(remoteTask.getPartitionedSplitCount(), 20); } stage.abort(); }
private static String[] randomLabels( RandomValues random, Distribution<String> labels ) { if ( labels.length() == 0 ) { return NO_LABELS; } int length = random.nextInt( min( 3, labels.length() ) ) + 1; String[] result = new String[length]; for ( int i = 0; i < result.length; ) { String candidate = labels.random( random ); if ( !ArrayUtil.contains( result, i, candidate ) ) { result[i++] = candidate; } } return result; }