public void clean() { // the lock protects removal from a concurrent put which could otherwise mutate the // queue after it has been removed from the map synchronized (unsent) { Iterator<ConcurrentLinkedQueue<ClientRequest>> iterator = unsent.values().iterator(); while (iterator.hasNext()) { ConcurrentLinkedQueue<ClientRequest> requests = iterator.next(); if (requests.isEmpty()) iterator.remove(); } } }
protected void remove() { synchronized(connections) { connections.remove(this); } for(Set<Connection> conns: subscriptions.values()) { conns.remove(this); } for(Iterator<Map.Entry<String,Set<Connection>>> it=subscriptions.entrySet().iterator(); it.hasNext();) { Map.Entry<String,Set<Connection>> entry=it.next(); if(entry.getValue().isEmpty()) it.remove(); } }
while (iterator.hasNext()) { String worker = iterator.next(); ZkWorker zkWorker = zkWorkers.get(worker); try { log.info("Adding Worker[%s] to lazySet!", zkWorker.getWorker().getHost()); lazyWorkers.put(worker, zkWorker); if (lazyWorkers.size() == maxWorkers) { return getWorkerFromZK(lazyWorkers.values());
@Override public void destroy() throws Exception { for (ReferenceBean referenceBean : referenceBeansCache.values()) { if (logger.isInfoEnabled()) { logger.info(referenceBean + " was destroying!"); } referenceBean.destroy(); } injectionMetadataCache.clear(); referenceBeansCache.clear(); if (logger.isInfoEnabled()) { logger.info(getClass() + " was destroying!"); } }
private Collection<ClientRequest> removeExpiredRequests(long now) { List<ClientRequest> expiredRequests = new ArrayList<>(); for (ConcurrentLinkedQueue<ClientRequest> requests : unsent.values()) { Iterator<ClientRequest> requestIterator = requests.iterator(); while (requestIterator.hasNext()) { ClientRequest request = requestIterator.next(); long elapsedMs = Math.max(0, now - request.createdTimeMs()); if (elapsedMs > request.requestTimeoutMs()) { expiredRequests.add(request); requestIterator.remove(); } else break; } } return expiredRequests; }
@Override public V getOne(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { boolean notElapsed = _latch.await(timeout, unit); if (!notElapsed) { throw new TimeoutException("Timedout waiting for async result for composite "); } if (_delayedResponseMap.isEmpty()) { return null; } return _delayedResponseMap.values().iterator().next(); }
@Override public void destroy() throws Exception { for (ReferenceBean referenceBean : referenceBeansCache.values()) { if (logger.isInfoEnabled()) { logger.info(referenceBean + " was destroying!"); } referenceBean.destroy(); } injectionMetadataCache.clear(); referenceBeansCache.clear(); if (logger.isInfoEnabled()) { logger.info(getClass() + " was destroying!"); } }
public List<Map<String, Object>> getURIStatDataList() { List<Map<String, Object>> uriStatDataList = new ArrayList<Map<String, Object>>(this.uriStatMap.size()); for (WebURIStat uriStat : this.uriStatMap.values()) { Map<String, Object> uriStatData = uriStat.getStatData(); int runningCount = ((Number) uriStatData.get("RunningCount")).intValue(); long requestCount = (Long) uriStatData.get("RequestCount"); if (runningCount == 0 && requestCount == 0) { continue; } uriStatDataList.add(uriStatData); } return uriStatDataList; }
public Collection<ConsumerFilterData> getByGroup(final String consumerGroup) { Collection<ConsumerFilterData> ret = new HashSet<ConsumerFilterData>(); Iterator<FilterDataMapByTopic> topicIterator = this.filterDataByTopic.values().iterator(); while (topicIterator.hasNext()) { FilterDataMapByTopic filterDataMapByTopic = topicIterator.next(); Iterator<ConsumerFilterData> filterDataIterator = filterDataMapByTopic.getGroupFilterData().values().iterator(); while (filterDataIterator.hasNext()) { ConsumerFilterData filterData = filterDataIterator.next(); if (filterData.getConsumerGroup().equals(consumerGroup)) { ret.add(filterData); } } } return ret; }
try { Iterator<Map.Entry<String, JdbcSqlStat>> iter = sqlStatMap.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<String, JdbcSqlStat> entry = iter.next(); JdbcSqlStat stat = entry.getValue(); if (stat.getExecuteCount() == 0 && stat.getRunningCount() == 0) { for (JdbcConnectionStat.Entry connectionStat : connections.values()) { connectionStat.reset();
/** * Test hook to return the per entry overhead for a bucket region. PRECONDITION: a bucket must * exist and be using LRU. * * @since GemFire 6.1.2.9 */ public int getPerEntryLRUOverhead() { BucketRegion br = (localBucket2RegionMap.values().iterator().next()); return br.getRegionMap().getEntryOverhead(); }
@Override public void destroy() throws Exception { for (Object object : injectedObjectsCache.values()) { if (logger.isInfoEnabled()) { logger.info(object + " was destroying!"); } if (object instanceof DisposableBean) { ((DisposableBean) object).destroy(); } } injectionMetadataCache.clear(); injectedObjectsCache.clear(); if (logger.isInfoEnabled()) { logger.info(getClass() + " was destroying!"); } }