/*
|
* The contents of this file are subject to the terms of the Common Development and
|
* Distribution License (the License). You may not use this file except in compliance with the
|
* License.
|
*
|
* You can obtain a copy of the License at legal/CDDLv1.0.txt. See the License for the
|
* specific language governing permission and limitations under the License.
|
*
|
* When distributing Covered Software, include this CDDL Header Notice in each file and include
|
* the License file at legal/CDDLv1.0.txt. If applicable, add the following below the CDDL
|
* Header, with the fields enclosed by brackets [] replaced by your own identifying
|
* information: "Portions Copyright [year] [name of copyright owner]".
|
*
|
* Copyright 2006-2008 Sun Microsystems, Inc.
|
* Portions Copyright 2011-2016 ForgeRock AS.
|
*/
|
package org.opends.server.extensions;
|
|
import static org.opends.messages.ExtensionMessages.*;
|
|
import java.util.ArrayList;
|
import java.util.HashMap;
|
import java.util.Iterator;
|
import java.util.LinkedHashMap;
|
import java.util.List;
|
import java.util.Map;
|
import java.util.Set;
|
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.locks.Lock;
|
import java.util.concurrent.locks.ReadWriteLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
|
import org.forgerock.i18n.LocalizableMessage;
|
import org.forgerock.i18n.slf4j.LocalizedLogger;
|
import org.forgerock.opendj.config.server.ConfigChangeResult;
|
import org.forgerock.opendj.config.server.ConfigException;
|
import org.forgerock.opendj.ldap.DN;
|
import org.forgerock.util.Utils;
|
import org.forgerock.opendj.config.server.ConfigurationChangeListener;
|
import org.forgerock.opendj.server.config.server.EntryCacheCfg;
|
import org.forgerock.opendj.server.config.server.FIFOEntryCacheCfg;
|
import org.opends.server.api.EntryCache;
|
import org.opends.server.api.MonitorData;
|
import org.opends.server.core.DirectoryServer;
|
import org.opends.server.core.ServerContext;
|
import org.opends.server.types.CacheEntry;
|
import org.opends.server.types.Entry;
|
import org.opends.server.types.InitializationException;
|
import org.opends.server.types.SearchFilter;
|
import org.opends.server.util.ServerConstants;
|
|
/**
|
* This class defines a Directory Server entry cache that uses a FIFO to keep
|
* track of the entries. Entries that have been in the cache the longest are
|
* the most likely candidates for purging if space is needed. In contrast to
|
* other cache structures, the selection of entries to purge is not based on
|
* how frequently or recently the entries have been accessed. This requires
|
* significantly less locking (it will only be required when an entry is added
|
* or removed from the cache, rather than each time an entry is accessed).
|
* <BR><BR>
|
* Cache sizing is based on the percentage of free memory within the JVM, such
|
* that if enough memory is free, then adding an entry to the cache will not
|
* require purging, but if more than a specified percentage of the available
|
* memory within the JVM is already consumed, then one or more entries will need
|
* to be removed in order to make room for a new entry. It is also possible to
|
* configure a maximum number of entries for the cache. If this is specified,
|
* then the number of entries will not be allowed to exceed this value, but it
|
* may not be possible to hold this many entries if the available memory fills
|
* up first.
|
* <BR><BR>
|
* Other configurable parameters for this cache include the maximum length of
|
* time to block while waiting to acquire a lock, and a set of filters that may
|
* be used to define criteria for determining which entries are stored in the
|
* cache. If a filter list is provided, then only entries matching at least one
|
* of the given filters will be stored in the cache.
|
*/
|
public class FIFOEntryCache
|
extends EntryCache <FIFOEntryCacheCfg>
|
implements ConfigurationChangeListener<FIFOEntryCacheCfg>
|
{
|
private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
|
|
/** The reference to the Java runtime used to determine the amount of memory currently in use. */
|
private static final Runtime runtime = Runtime.getRuntime();
|
|
/** The mapping between entry backends/IDs and entries. */
|
private Map<String, Map<Long, CacheEntry>> idMap;
|
|
/** The mapping between DNs and entries. */
|
private LinkedHashMap<DN,CacheEntry> dnMap;
|
|
/** The lock used to provide threadsafe access when changing the contents of the cache. */
|
private ReadWriteLock cacheLock;
|
private Lock cacheWriteLock;
|
private Lock cacheReadLock;
|
|
/**
|
* The maximum amount of memory in bytes that the JVM will be allowed to use
|
* before we need to start purging entries.
|
*/
|
private long maxAllowedMemory;
|
|
/** The maximum number of entries that may be held in the cache. */
|
private long maxEntries;
|
|
/** Currently registered configuration object. */
|
private FIFOEntryCacheCfg registeredConfiguration;
|
|
/** The maximum length of time to try to obtain a lock before giving up. */
|
private long lockTimeout = 2000;
|
|
private ServerContext serverContext;
|
|
/** Creates a new instance of this FIFO entry cache. */
|
public FIFOEntryCache()
|
{
|
super();
|
// All initialization should be performed in the initializeEntryCache.
|
}
|
|
@Override
|
public void initializeEntryCache(ServerContext serverContext, FIFOEntryCacheCfg configuration)
|
throws ConfigException, InitializationException
|
{
|
this.serverContext = serverContext;
|
registeredConfiguration = configuration;
|
configuration.addFIFOChangeListener (this);
|
|
// Initialize the cache structures.
|
idMap = new HashMap<>();
|
dnMap = new LinkedHashMap<>();
|
|
// Initialize locks.
|
cacheLock = new ReentrantReadWriteLock(true);
|
cacheWriteLock = cacheLock.writeLock();
|
cacheReadLock = cacheLock.readLock();
|
|
// Read configuration and apply changes.
|
boolean applyChanges = true;
|
List<LocalizableMessage> errorMessages = new ArrayList<>();
|
EntryCacheCommon.ConfigErrorHandler errorHandler =
|
EntryCacheCommon.getConfigErrorHandler (
|
EntryCacheCommon.ConfigPhase.PHASE_INIT, null, errorMessages
|
);
|
if (!processEntryCacheConfig(configuration, applyChanges, errorHandler)) {
|
String buffer = Utils.joinAsString(". ", errorMessages);
|
throw new ConfigException(ERR_FIFOCACHE_CANNOT_INITIALIZE.get(buffer));
|
}
|
}
|
|
@Override
|
public void finalizeEntryCache()
|
{
|
cacheWriteLock.lock();
|
|
try {
|
registeredConfiguration.removeFIFOChangeListener(this);
|
|
// Release all memory currently in use by this cache.
|
try {
|
idMap.clear();
|
dnMap.clear();
|
} catch (Exception e) {
|
// This should never happen.
|
logger.traceException(e);
|
}
|
} finally {
|
cacheWriteLock.unlock();
|
}
|
}
|
|
@Override
|
public boolean containsEntry(DN entryDN)
|
{
|
if (entryDN == null) {
|
return false;
|
}
|
|
// Indicate whether the DN map contains the specified DN.
|
cacheReadLock.lock();
|
try {
|
return dnMap.containsKey(entryDN);
|
} finally {
|
cacheReadLock.unlock();
|
}
|
}
|
|
@Override
|
public Entry getEntry(DN entryDN)
|
{
|
// Simply return the entry from the DN map.
|
cacheReadLock.lock();
|
try {
|
CacheEntry e = dnMap.get(entryDN);
|
if (e == null) {
|
// Indicate cache miss.
|
cacheMisses.getAndIncrement();
|
return null;
|
}
|
// Indicate cache hit.
|
cacheHits.getAndIncrement();
|
return e.getEntry();
|
} finally {
|
cacheReadLock.unlock();
|
}
|
}
|
|
@Override
|
public long getEntryID(DN entryDN)
|
{
|
// Simply return the ID from the DN map.
|
cacheReadLock.lock();
|
try {
|
CacheEntry e = dnMap.get(entryDN);
|
return e != null ? e.getEntryID() : -1;
|
} finally {
|
cacheReadLock.unlock();
|
}
|
}
|
|
@Override
|
public DN getEntryDN(String backendID, long entryID)
|
{
|
// Locate specific backend map and return the entry DN by ID.
|
cacheReadLock.lock();
|
try {
|
Map<Long, CacheEntry> backendMap = idMap.get(backendID);
|
if (backendMap != null) {
|
CacheEntry e = backendMap.get(entryID);
|
if (e != null) {
|
return e.getDN();
|
}
|
}
|
return null;
|
} finally {
|
cacheReadLock.unlock();
|
}
|
}
|
|
@Override
|
public void putEntry(Entry entry, String backendID, long entryID)
|
{
|
// Create the cache entry based on the provided information.
|
CacheEntry cacheEntry = new CacheEntry(entry, backendID, entryID);
|
|
// Obtain a lock on the cache. If this fails, then don't do anything.
|
try
|
{
|
if (!cacheWriteLock.tryLock(lockTimeout, TimeUnit.MILLISECONDS))
|
{
|
return;
|
}
|
}
|
catch (Exception e)
|
{
|
logger.traceException(e);
|
|
return;
|
}
|
|
// At this point, we hold the lock. No matter what, we must release the
|
// lock before leaving this method, so do that in a finally block.
|
try
|
{
|
// See if the current memory usage is within acceptable constraints. If
|
// so, then add the entry to the cache (or replace it if it is already
|
// present). If not, then remove an existing entry and don't add the new
|
// entry.
|
long usedMemory = runtime.totalMemory() - runtime.freeMemory();
|
if (usedMemory > maxAllowedMemory)
|
{
|
CacheEntry cachedEntry = dnMap.remove(entry.getName());
|
if (cachedEntry == null)
|
{
|
// The current entry wasn't there, let's remove an existing entry.
|
Iterator<CacheEntry> iterator = dnMap.values().iterator();
|
if (iterator.hasNext())
|
{
|
CacheEntry ce = iterator.next();
|
iterator.remove();
|
|
Map<Long,CacheEntry> m = idMap.get(ce.getBackendID());
|
if (m != null)
|
{
|
m.remove(ce.getEntryID());
|
}
|
}
|
}
|
else
|
{
|
// Try to remove the entry from the ID list as well.
|
Map<Long,CacheEntry> map = idMap.get(backendID);
|
if (map != null)
|
{
|
map.remove(cacheEntry.getEntryID());
|
// If this backend becomes empty now remove it from the idMap map.
|
if (map.isEmpty())
|
{
|
idMap.remove(backendID);
|
}
|
}
|
}
|
}
|
else
|
{
|
// Add the entry to the cache. This will replace it if it is already
|
// present and add it if it isn't.
|
dnMap.put(entry.getName(), cacheEntry);
|
|
Map<Long,CacheEntry> map = idMap.get(backendID);
|
if (map == null)
|
{
|
map = new HashMap<>();
|
map.put(entryID, cacheEntry);
|
idMap.put(backendID, map);
|
}
|
else
|
{
|
map.put(entryID, cacheEntry);
|
}
|
|
// See if a cap has been placed on the maximum number of entries in the
|
// cache. If so, then see if we have exceeded it and we need to purge
|
// entries until we're within the limit.
|
int entryCount = dnMap.size();
|
if (maxEntries > 0 && entryCount > maxEntries)
|
{
|
Iterator<CacheEntry> iterator = dnMap.values().iterator();
|
while (iterator.hasNext() && entryCount > maxEntries)
|
{
|
CacheEntry ce = iterator.next();
|
iterator.remove();
|
|
Map<Long,CacheEntry> m = idMap.get(ce.getBackendID());
|
if (m != null)
|
{
|
m.remove(ce.getEntryID());
|
}
|
|
entryCount--;
|
}
|
}
|
}
|
}
|
catch (Exception e)
|
{
|
logger.traceException(e);
|
}
|
finally
|
{
|
cacheWriteLock.unlock();
|
}
|
}
|
|
@Override
|
public boolean putEntryIfAbsent(Entry entry, String backendID, long entryID)
|
{
|
// Create the cache entry based on the provided information.
|
CacheEntry cacheEntry = new CacheEntry(entry, backendID, entryID);
|
|
// Obtain a lock on the cache. If this fails, then don't do anything.
|
try
|
{
|
if (!cacheWriteLock.tryLock(lockTimeout, TimeUnit.MILLISECONDS))
|
{
|
// We can't rule out the possibility of a conflict, so return false.
|
return false;
|
}
|
}
|
catch (Exception e)
|
{
|
logger.traceException(e);
|
|
// We can't rule out the possibility of a conflict, so return false.
|
return false;
|
}
|
|
// At this point, we hold the lock. No matter what, we must release the
|
// lock before leaving this method, so do that in a finally block.
|
try
|
{
|
// See if the entry already exists in the cache. If it does, then we will
|
// fail and not actually store the entry.
|
if (dnMap.containsKey(entry.getName()))
|
{
|
return false;
|
}
|
|
// See if the current memory usage is within acceptable constraints. If
|
// so, then add the entry to the cache (or replace it if it is already
|
// present). If not, then remove an existing entry and don't add the new
|
// entry.
|
long usedMemory = runtime.totalMemory() - runtime.freeMemory();
|
if (usedMemory > maxAllowedMemory)
|
{
|
Iterator<CacheEntry> iterator = dnMap.values().iterator();
|
if (iterator.hasNext())
|
{
|
CacheEntry ce = iterator.next();
|
iterator.remove();
|
|
Map<Long,CacheEntry> m = idMap.get(ce.getBackendID());
|
if (m != null)
|
{
|
m.remove(ce.getEntryID());
|
}
|
}
|
}
|
else
|
{
|
// Add the entry to the cache. This will replace it if it is already
|
// present and add it if it isn't.
|
dnMap.put(entry.getName(), cacheEntry);
|
|
Map<Long,CacheEntry> map = idMap.get(backendID);
|
if (map == null)
|
{
|
map = new HashMap<>();
|
map.put(entryID, cacheEntry);
|
idMap.put(backendID, map);
|
}
|
else
|
{
|
map.put(entryID, cacheEntry);
|
}
|
|
// See if a cap has been placed on the maximum number of entries in the
|
// cache. If so, then see if we have exceeded it and we need to purge
|
// entries until we're within the limit.
|
int entryCount = dnMap.size();
|
if (maxEntries > 0 && entryCount > maxEntries)
|
{
|
Iterator<CacheEntry> iterator = dnMap.values().iterator();
|
while (iterator.hasNext() && entryCount > maxEntries)
|
{
|
CacheEntry ce = iterator.next();
|
iterator.remove();
|
|
Map<Long,CacheEntry> m = idMap.get(ce.getBackendID());
|
if (m != null)
|
{
|
m.remove(ce.getEntryID());
|
}
|
|
entryCount--;
|
}
|
}
|
}
|
|
// We'll always return true in this case, even if we didn't actually add
|
// the entry due to memory constraints.
|
return true;
|
}
|
catch (Exception e)
|
{
|
logger.traceException(e);
|
|
// We can't be sure there wasn't a conflict, so return false.
|
return false;
|
}
|
finally
|
{
|
cacheWriteLock.unlock();
|
}
|
}
|
|
@Override
|
public void removeEntry(DN entryDN)
|
{
|
// Acquire the lock on the cache. We should not return until the entry is
|
// removed, so we will block until we can obtain the lock.
|
// FIXME -- An alternate approach could be to block for a maximum length of
|
// time and then if it fails then put it in a queue for processing by some
|
// other thread before it releases the lock.
|
cacheWriteLock.lock();
|
|
// At this point, it is absolutely critical that we always release the lock
|
// before leaving this method, so do so in a finally block.
|
try
|
{
|
// Check the DN cache to see if the entry exists. If not, then don't do
|
// anything.
|
CacheEntry entry = dnMap.remove(entryDN);
|
if (entry == null)
|
{
|
return;
|
}
|
|
final String backendID = entry.getBackendID();
|
|
// Try to remove the entry from the ID list as well.
|
Map<Long,CacheEntry> map = idMap.get(backendID);
|
if (map == null)
|
{
|
// This should't happen, but the entry isn't cached in the ID map so
|
// we can return.
|
return;
|
}
|
|
map.remove(entry.getEntryID());
|
|
// If this backend becomes empty now remove it from the idMap map.
|
if (map.isEmpty())
|
{
|
idMap.remove(backendID);
|
}
|
}
|
catch (Exception e)
|
{
|
logger.traceException(e);
|
|
// This shouldn't happen, but there's not much that we can do if it does.
|
}
|
finally
|
{
|
cacheWriteLock.unlock();
|
}
|
}
|
|
@Override
|
public void clear()
|
{
|
// Acquire a lock on the cache. We should not return until the cache has
|
// been cleared, so we will block until we can obtain the lock.
|
cacheWriteLock.lock();
|
|
// At this point, it is absolutely critical that we always release the lock
|
// before leaving this method, so do so in a finally block.
|
try
|
{
|
// Clear the DN cache.
|
dnMap.clear();
|
|
// Clear the ID cache.
|
idMap.clear();
|
}
|
catch (Exception e)
|
{
|
logger.traceException(e);
|
|
// This shouldn't happen, but there's not much that we can do if it does.
|
}
|
finally
|
{
|
cacheWriteLock.unlock();
|
}
|
}
|
|
@Override
|
public void clearBackend(String backendID)
|
{
|
// Acquire a lock on the cache. We should not return until the cache has
|
// been cleared, so we will block until we can obtain the lock.
|
cacheWriteLock.lock();
|
|
// At this point, it is absolutely critical that we always release the lock
|
// before leaving this method, so do so in a finally block.
|
try
|
{
|
// Remove all references to entries for this backend from the ID cache.
|
Map<Long,CacheEntry> map = idMap.remove(backendID);
|
if (map == null)
|
{
|
// No entries were in the cache for this backend, so we can return
|
// without doing anything.
|
return;
|
}
|
|
// Unfortunately, there is no good way to dump the entries from the DN
|
// cache based on their backend, so we will need to iterate through the
|
// entries in the ID map and do it manually. Since this could take a
|
// while, we'll periodically release and re-acquire the lock in case
|
// anyone else is waiting on it so this doesn't become a stop-the-world
|
// event as far as the cache is concerned.
|
int entriesDeleted = 0;
|
for (CacheEntry e : map.values())
|
{
|
dnMap.remove(e.getEntry().getName());
|
entriesDeleted++;
|
|
if ((entriesDeleted % 1000) == 0)
|
{
|
cacheWriteLock.unlock();
|
Thread.yield();
|
cacheWriteLock.lock();
|
}
|
}
|
}
|
catch (Exception e)
|
{
|
logger.traceException(e);
|
|
// This shouldn't happen, but there's not much that we can do if it does.
|
}
|
finally
|
{
|
cacheWriteLock.unlock();
|
}
|
}
|
|
@Override
|
public void handleLowMemory()
|
{
|
// Grab the lock on the cache and wait until we have it.
|
cacheWriteLock.lock();
|
|
// At this point, it is absolutely critical that we always release the lock
|
// before leaving this method, so do so in a finally block.
|
try
|
{
|
// See how many entries are in the cache. If there are less than 1000,
|
// then we'll dump all of them. Otherwise, we'll dump 10% of the entries.
|
int numEntries = dnMap.size();
|
if (numEntries < 1000)
|
{
|
dnMap.clear();
|
idMap.clear();
|
}
|
else
|
{
|
int numToDrop = numEntries / 10;
|
Iterator<CacheEntry> iterator = dnMap.values().iterator();
|
while (iterator.hasNext() && numToDrop > 0)
|
{
|
CacheEntry entry = iterator.next();
|
iterator.remove();
|
|
Map<Long,CacheEntry> m = idMap.get(entry.getBackendID());
|
if (m != null)
|
{
|
m.remove(entry.getEntryID());
|
}
|
|
numToDrop--;
|
}
|
}
|
}
|
catch (Exception e)
|
{
|
logger.traceException(e);
|
|
// This shouldn't happen, but there's not much that we can do if it does.
|
}
|
finally
|
{
|
cacheWriteLock.unlock();
|
}
|
}
|
|
@Override
|
public boolean isConfigurationAcceptable(EntryCacheCfg configuration,
|
List<LocalizableMessage> unacceptableReasons)
|
{
|
FIFOEntryCacheCfg config = (FIFOEntryCacheCfg) configuration;
|
return isConfigurationChangeAcceptable(config, unacceptableReasons);
|
}
|
|
@Override
|
public boolean isConfigurationChangeAcceptable(
|
FIFOEntryCacheCfg configuration,
|
List<LocalizableMessage> unacceptableReasons
|
)
|
{
|
boolean applyChanges = false;
|
EntryCacheCommon.ConfigErrorHandler errorHandler =
|
EntryCacheCommon.getConfigErrorHandler (
|
EntryCacheCommon.ConfigPhase.PHASE_ACCEPTABLE,
|
unacceptableReasons,
|
null
|
);
|
processEntryCacheConfig (configuration, applyChanges, errorHandler);
|
|
return errorHandler.getIsAcceptable();
|
}
|
|
@Override
|
public ConfigChangeResult applyConfigurationChange( FIFOEntryCacheCfg configuration )
|
{
|
boolean applyChanges = true;
|
List<LocalizableMessage> errorMessages = new ArrayList<>();
|
EntryCacheCommon.ConfigErrorHandler errorHandler =
|
EntryCacheCommon.getConfigErrorHandler (
|
EntryCacheCommon.ConfigPhase.PHASE_APPLY, null, errorMessages
|
);
|
|
// Do not apply changes unless this cache is enabled.
|
if (configuration.isEnabled()) {
|
processEntryCacheConfig (configuration, applyChanges, errorHandler);
|
}
|
|
final ConfigChangeResult changeResult = new ConfigChangeResult();
|
changeResult.setResultCode(errorHandler.getResultCode());
|
changeResult.setAdminActionRequired(errorHandler.getIsAdminActionRequired());
|
changeResult.getMessages().addAll(errorHandler.getErrorMessages());
|
return changeResult;
|
}
|
|
/**
|
* Parses the provided configuration and configure the entry cache.
|
*
|
* @param configuration The new configuration containing the changes.
|
* @param applyChanges If true then take into account the new configuration.
|
* @param errorHandler An handler used to report errors.
|
*
|
* @return <CODE>true</CODE> if configuration is acceptable,
|
* or <CODE>false</CODE> otherwise.
|
*/
|
private boolean processEntryCacheConfig(
|
FIFOEntryCacheCfg configuration,
|
boolean applyChanges,
|
EntryCacheCommon.ConfigErrorHandler errorHandler
|
)
|
{
|
// Local variables to read configuration.
|
Set<SearchFilter> newIncludeFilters = null;
|
Set<SearchFilter> newExcludeFilters = null;
|
|
// Read configuration.
|
DN newConfigEntryDN = configuration.dn();
|
long newLockTimeout = configuration.getLockTimeout();
|
long newMaxEntries = configuration.getMaxEntries();
|
|
// Maximum memory the cache can use.
|
int newMaxMemoryPercent = configuration.getMaxMemoryPercent();
|
long maxJvmHeapSize = Runtime.getRuntime().maxMemory();
|
long newMaxAllowedMemory = (maxJvmHeapSize / 100) * newMaxMemoryPercent;
|
|
// Get include and exclude filters.
|
switch (errorHandler.getConfigPhase())
|
{
|
case PHASE_INIT:
|
case PHASE_ACCEPTABLE:
|
case PHASE_APPLY:
|
newIncludeFilters = EntryCacheCommon.getFilters (
|
configuration.getIncludeFilter(),
|
ERR_CACHE_INVALID_INCLUDE_FILTER,
|
errorHandler,
|
newConfigEntryDN
|
);
|
newExcludeFilters = EntryCacheCommon.getFilters (
|
configuration.getExcludeFilter(),
|
ERR_CACHE_INVALID_EXCLUDE_FILTER,
|
errorHandler,
|
newConfigEntryDN
|
);
|
break;
|
}
|
|
if (applyChanges && errorHandler.getIsAcceptable())
|
{
|
maxEntries = newMaxEntries;
|
maxAllowedMemory = newMaxAllowedMemory;
|
lockTimeout = newLockTimeout;
|
setIncludeFilters(newIncludeFilters);
|
setExcludeFilters(newExcludeFilters);
|
registeredConfiguration = configuration;
|
}
|
|
return errorHandler.getIsAcceptable();
|
}
|
|
@Override
|
public MonitorData getMonitorData()
|
{
|
try {
|
return EntryCacheCommon.getGenericMonitorData(
|
cacheHits.longValue(),
|
// If cache misses is maintained by default cache
|
// get it from there and if not point to itself.
|
DirectoryServer.getEntryCache().getCacheMisses(),
|
null,
|
maxAllowedMemory,
|
Long.valueOf(dnMap.size()),
|
Long.valueOf(
|
(maxEntries != Integer.MAX_VALUE && maxEntries != Long.MAX_VALUE) ? maxEntries : 0)
|
);
|
} catch (Exception e) {
|
logger.traceException(e);
|
return new MonitorData(0);
|
}
|
}
|
|
@Override
|
public Long getCacheCount()
|
{
|
return Long.valueOf(dnMap.size());
|
}
|
|
@Override
|
public String toVerboseString()
|
{
|
StringBuilder sb = new StringBuilder();
|
|
Map<DN,CacheEntry> dnMapCopy;
|
Map<String, Map<Long, CacheEntry>> idMapCopy;
|
|
// Grab cache lock to prevent any modifications
|
// to the cache maps until a snapshot is taken.
|
cacheWriteLock.lock();
|
try {
|
// Examining the real maps will hold the lock and can cause map
|
// modifications in case of any access order maps, make copies
|
// instead.
|
dnMapCopy = new LinkedHashMap<>(dnMap);
|
idMapCopy = new HashMap<>(idMap);
|
} finally {
|
cacheWriteLock.unlock();
|
}
|
|
// Check dnMap first.
|
for (Map.Entry<DN, CacheEntry> mapEntry : dnMapCopy.entrySet()) {
|
DN dn = mapEntry.getKey();
|
final CacheEntry cacheEntry = mapEntry.getValue();
|
sb.append(dn);
|
sb.append(":");
|
sb.append(cacheEntry != null ? Long.toString(cacheEntry.getEntryID()) : null);
|
sb.append(":");
|
sb.append(cacheEntry != null ? cacheEntry.getBackendID() : null);
|
sb.append(ServerConstants.EOL);
|
}
|
|
// See if there is anything on idMap that is not reflected on
|
// dnMap in case maps went out of sync.
|
for (Map.Entry<String, Map<Long, CacheEntry>> backendCache : idMapCopy.entrySet()) {
|
final String backendID = backendCache.getKey();
|
for (Map.Entry<Long, CacheEntry> entry : backendCache.getValue().entrySet()) {
|
final CacheEntry cacheEntry = entry.getValue();
|
if (cacheEntry == null || !dnMapCopy.containsKey(cacheEntry.getDN())) {
|
sb.append(cacheEntry != null ? cacheEntry.getDN() : null);
|
sb.append(":");
|
sb.append(entry.getKey());
|
sb.append(":");
|
sb.append(backendID);
|
sb.append(ServerConstants.EOL);
|
}
|
}
|
}
|
|
String verboseString = sb.toString();
|
return verboseString.length() > 0 ? verboseString : null;
|
}
|
}
|