From e1fd1b1ce8793cb81d0cf70736872af324227bbf Mon Sep 17 00:00:00 2001
From: abobrov <abobrov@localhost>
Date: Tue, 05 Jun 2007 15:14:44 +0000
Subject: [PATCH] [Issue 1117] Create an entry cache backed by DB+tmpfs
---
opendj-sdk/opends/src/server/org/opends/server/core/DirectoryServer.java | 53
opendj-sdk/opends/src/server/org/opends/server/messages/JebMessages.java | 13
opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/FileSystemEntryCacheConfiguration.xml | 274 ++++
opendj-sdk/opends/src/server/org/opends/server/messages/ExtensionsMessages.java | 280 +++++
opendj-sdk/opends/src/server/org/opends/server/config/ConfigConstants.java | 112 ++
opendj-sdk/opends/src/server/org/opends/server/extensions/FileSystemEntryCache.java | 2363 ++++++++++++++++++++++++++++++++++++++++++
opendj-sdk/opends/src/server/org/opends/server/backends/jeb/BackendImpl.java | 73 +
opendj-sdk/opends/resource/schema/02-config.ldif | 18
opendj-sdk/opends/src/server/org/opends/server/extensions/FileSystemEntryCacheIndex.java | 73 +
9 files changed, 3,250 insertions(+), 9 deletions(-)
diff --git a/opendj-sdk/opends/resource/schema/02-config.ldif b/opendj-sdk/opends/resource/schema/02-config.ldif
index 0907d67..4cb3c58 100644
--- a/opendj-sdk/opends/resource/schema/02-config.ldif
+++ b/opendj-sdk/opends/resource/schema/02-config.ldif
@@ -1245,6 +1245,18 @@
NAME 'ds-cfg-writer-append'
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE
X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.362
+ NAME 'ds-cfg-max-memory-size' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+ SINGLE-VALUE X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.363
+ NAME 'ds-cfg-cache-type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.364
+ NAME 'ds-cfg-cache-directory' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.365
+ NAME 'ds-cfg-persistent-cache' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
+ SINGLE-VALUE X-ORIGIN 'OpenDS Directory Server' )
attributeTypes: ( 1.3.6.1.4.1.26027.1.1.375
NAME 'ds-cfg-allow-retrieving-membership' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
SINGLE-VALUE X-ORIGIN 'OpenDS Directory Server' )
@@ -1807,4 +1819,10 @@
NAME 'ds-cfg-member-virtual-attribute' SUP ds-cfg-virtual-attribute
STRUCTURAL MUST ds-cfg-allow-retrieving-membership
X-ORIGIN 'OpenDS Directory Server' )
+objectClasses: ( 1.3.6.1.4.1.26027.1.2.110 NAME 'ds-cfg-file-system-entry-cache'
+ SUP ds-cfg-entry-cache STRUCTURAL MAY ( ds-cfg-max-entries $
+ ds-cfg-max-memory-size $ ds-cfg-lock-timeout $ ds-cfg-exclude-filter $
+ ds-cfg-include-filter $ ds-cfg-cache-directory $ ds-cfg-cache-type $
+ ds-cfg-persistent-cache $ ds-cfg-database-cache-percent $
+ ds-cfg-database-cache-size ) X-ORIGIN 'OpenDS Directory Server' )
diff --git a/opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/FileSystemEntryCacheConfiguration.xml b/opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/FileSystemEntryCacheConfiguration.xml
new file mode 100644
index 0000000..25a628f
--- /dev/null
+++ b/opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/FileSystemEntryCacheConfiguration.xml
@@ -0,0 +1,274 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ ! CDDL HEADER START
+ !
+ ! The contents of this file are subject to the terms of the
+ ! Common Development and Distribution License, Version 1.0 only
+ ! (the "License"). You may not use this file except in compliance
+ ! with the License.
+ !
+ ! You can obtain a copy of the license at
+ ! trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ ! or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ ! See the License for the specific language governing permissions
+ ! and limitations under the License.
+ !
+ ! When distributing Covered Code, include this CDDL HEADER in each
+ ! file and include the License file at
+ ! trunk/opends/resource/legal-notices/OpenDS.LICENSE. If applicable,
+ ! add the following below this CDDL HEADER, with the fields enclosed
+ ! by brackets "[]" replaced with your own identifying information:
+ ! Portions Copyright [yyyy] [name of copyright owner]
+ !
+ ! CDDL HEADER END
+ !
+ !
+ ! Portions Copyright 2007 Sun Microsystems, Inc.
+ ! -->
+
+<adm:managed-object
+ name="file-system-entry-cache"
+ plural-name="file-system-entry-caches"
+ package="org.opends.server.admin.std"
+ extends="entry-cache"
+ xmlns:adm="http://www.opends.org/admin"
+ xmlns:ldap="http://www.opends.org/admin-ldap"
+ >
+
+ <adm:synopsis>
+ The <adm:user-friendly-name/> is an entry cache implementation which uses
+ a JE database to keep track of the entries.
+ </adm:synopsis>
+
+ <adm:description>
+ For the best performance the JE database should reside in a memory based
+ file system, although any file system will do for this cache to function.
+ Entries are maintained either by FIFO (default) or LRU (configurable)
+ based list implementation.
+
+ Cache sizing is based on the size or percentage of free space availble in
+ the file system, such that if enough memory is free, then adding an entry
+ to the cache will not require purging, but if more than a specified
+ percentage of the file system available space is already consumed, then
+ one or more entries will need to be removed in order to make room for a
+ new entry. It is also possible to configure a maximum number of entries
+ for the cache. If this is specified, then the number of entries will not
+ be allowed to exceed this value, but it may not be possible to hold this
+ many entries if the available memory fills up first.
+
+ Other configurable parameters for this cache include the maximum length of
+ time to block while waiting to acquire a lock, and a set of filters that
+ may be used to define criteria for determining which entries are stored in
+ the cache. If a set of filters are provided then an entry must match at
+ least one of them in order to be stored in the cache.
+
+ JE environment cache size can also be configured either as percentage of
+ the free memory available in the JVM, or as an absolute size in bytes.
+
+ This cache has a persistence property which, if enabled, allows for the
+ contents of the cache to persist across server or cache restarts.
+ </adm:description>
+
+ <adm:profile name="ldap">
+ <ldap:object-class>
+ <ldap:oid>1.3.6.1.4.1.26027.1.2.110</ldap:oid>
+ <ldap:name>ds-cfg-file-system-entry-cache</ldap:name>
+ <ldap:superior>ds-cfg-entry-cache</ldap:superior>
+ </ldap:object-class>
+ </adm:profile>
+
+ <adm:property name="lock-timeout">
+ <adm:synopsis>
+ The length of time in milliseconds to wait while
+ attempting to acquire a read or write lock.
+ </adm:synopsis>
+ <adm:default-behavior>
+ <adm:defined>
+ <adm:value>2000.0ms</adm:value>
+ </adm:defined>
+ </adm:default-behavior>
+ <adm:syntax>
+ <adm:duration base-unit="ms" allow-unlimited="true"/>
+ </adm:syntax>
+ <adm:profile name="ldap">
+ <ldap:attribute>
+ <ldap:oid>1.3.6.1.4.1.26027.1.1.58</ldap:oid>
+ <ldap:name>ds-cfg-lock-timeout</ldap:name>
+ </ldap:attribute>
+ </adm:profile>
+ </adm:property>
+
+ <adm:property name="max-memory-size">
+ <adm:synopsis>
+ The maximum size of the entry cache in bytes.
+ </adm:synopsis>
+ <adm:default-behavior>
+ <adm:defined>
+ <adm:value>0b</adm:value>
+ </adm:defined>
+ </adm:default-behavior>
+ <adm:syntax>
+ <adm:size/>
+ </adm:syntax>
+ <adm:profile name="ldap">
+ <ldap:attribute>
+ <ldap:oid>1.3.6.1.4.1.26027.1.1.362</ldap:oid>
+ <ldap:name>ds-cfg-max-memory-size</ldap:name>
+ </ldap:attribute>
+ </adm:profile>
+ </adm:property>
+
+ <adm:property name="max-entries">
+ <adm:synopsis>
+ The maximum number of entries allowed in the cache.
+ </adm:synopsis>
+ <adm:default-behavior>
+ <adm:defined>
+ <adm:value>2147483647</adm:value>
+ </adm:defined>
+ </adm:default-behavior>
+ <adm:syntax>
+ <adm:integer lower-limit="0"/>
+ </adm:syntax>
+ <adm:profile name="ldap">
+ <ldap:attribute>
+ <ldap:oid>1.3.6.1.4.1.26027.1.1.65</ldap:oid>
+ <ldap:name>ds-cfg-max-entries</ldap:name>
+ </ldap:attribute>
+ </adm:profile>
+ </adm:property>
+
+ <adm:property name="cache-type">
+ <adm:synopsis>
+ Specifies the policy which should be used for purging entries from the
+ cache. FIFO by default and LRU as configurable.
+ </adm:synopsis>
+ <adm:requires-admin-action>
+ <adm:component-restart/>
+ </adm:requires-admin-action>
+ <adm:default-behavior>
+ <adm:defined>
+ <adm:value>fifo</adm:value>
+ </adm:defined>
+ </adm:default-behavior>
+ <adm:syntax>
+ <adm:enumeration>
+ <adm:value name="fifo">
+ <adm:synopsis>
+ FIFO based entry cache.
+ </adm:synopsis>
+ </adm:value>
+ <adm:value name="lru">
+ <adm:synopsis>
+ LRU based entry cache.
+ </adm:synopsis>
+ </adm:value>
+ </adm:enumeration>
+ </adm:syntax>
+ <adm:profile name="ldap">
+ <ldap:attribute>
+ <ldap:oid>1.3.6.1.4.1.26027.1.1.363</ldap:oid>
+ <ldap:name>ds-cfg-cache-type</ldap:name>
+ </ldap:attribute>
+ </adm:profile>
+ </adm:property>
+
+ <adm:property name="cache-directory">
+ <adm:synopsis>
+ Specifies the directory in which the JE environment should store the
+ cache.
+ </adm:synopsis>
+ <adm:requires-admin-action>
+ <adm:component-restart/>
+ </adm:requires-admin-action>
+ <adm:default-behavior>
+ <adm:defined>
+ <adm:value>/tmp/OpenDS.FSCache</adm:value>
+ </adm:defined>
+ </adm:default-behavior>
+ <adm:syntax>
+ <adm:string/>
+ </adm:syntax>
+ <adm:profile name="ldap">
+ <ldap:attribute>
+ <ldap:oid>1.3.6.1.4.1.26027.1.1.364</ldap:oid>
+ <ldap:name>ds-cfg-cache-directory</ldap:name>
+ </ldap:attribute>
+ </adm:profile>
+ </adm:property>
+
+ <adm:property name="persistent-cache">
+ <adm:synopsis>
+ Specifies whether the cache should persist across restarts.
+ </adm:synopsis>
+ <adm:default-behavior>
+ <adm:defined>
+ <adm:value>false</adm:value>
+ </adm:defined>
+ </adm:default-behavior>
+ <adm:syntax>
+ <adm:boolean/>
+ </adm:syntax>
+ <adm:profile name="ldap">
+ <ldap:attribute>
+ <ldap:oid>1.3.6.1.4.1.26027.1.1.365</ldap:oid>
+ <ldap:name>ds-cfg-persistent-cache</ldap:name>
+ </ldap:attribute>
+ </adm:profile>
+ </adm:property>
+
+ <adm:property name="database-cache-percent">
+ <adm:synopsis>
+ The maximum memory usage for the internal JE cache as a percentage
+ of the total JVM memory.
+ </adm:synopsis>
+ <adm:default-behavior>
+ <adm:defined>
+ <adm:value>0</adm:value>
+ </adm:defined>
+ </adm:default-behavior>
+ <adm:syntax>
+ <adm:integer lower-limit="0" upper-limit="100"/>
+ </adm:syntax>
+ <adm:profile name="ldap">
+ <ldap:attribute>
+ <ldap:oid>1.3.6.1.4.1.26027.1.1.25</ldap:oid>
+ <ldap:name>ds-cfg-database-cache-percent</ldap:name>
+ </ldap:attribute>
+ </adm:profile>
+ </adm:property>
+
+ <adm:property name="database-cache-size">
+ <adm:synopsis>
+ The maximum JVM memory usage in bytes for the internal JE cache.
+ </adm:synopsis>
+ <adm:default-behavior>
+ <adm:defined>
+ <adm:value>0b</adm:value>
+ </adm:defined>
+ </adm:default-behavior>
+ <adm:syntax>
+ <adm:size/>
+ </adm:syntax>
+ <adm:profile name="ldap">
+ <ldap:attribute>
+ <ldap:oid>1.3.6.1.4.1.26027.1.1.27</ldap:oid>
+ <ldap:name>ds-cfg-database-cache-size</ldap:name>
+ </ldap:attribute>
+ </adm:profile>
+ </adm:property>
+
+ <adm:property-reference name="include-filter"/>
+ <adm:property-reference name="exclude-filter"/>
+
+ <adm:property-override name="entry-cache-class">
+ <adm:default-behavior>
+ <adm:defined>
+ <adm:value>
+ org.opends.server.extensions.FileSystemEntryCache
+ </adm:value>
+ </adm:defined>
+ </adm:default-behavior>
+ </adm:property-override>
+
+</adm:managed-object>
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/jeb/BackendImpl.java b/opendj-sdk/opends/src/server/org/opends/server/backends/jeb/BackendImpl.java
index c703fdd..667a514 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/backends/jeb/BackendImpl.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/jeb/BackendImpl.java
@@ -32,6 +32,14 @@
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FilenameFilter;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.zip.Adler32;
+import java.util.zip.CheckedInputStream;
+
import com.sleepycat.je.DatabaseException;
import org.opends.server.api.Backend;
@@ -185,6 +193,60 @@
}
}
+ /**
+ * This method will attempt to checksum the current JE db environment by
+ * computing the Adler-32 checksum on the latest JE log file available.
+ *
+ * @return The checksum of JE db environment or zero if checksum failed.
+ */
+ private long checksumDbEnv() {
+
+ List<File> jdbFiles =
+ Arrays.asList(config.getBackendDirectory().listFiles(new FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ return name.endsWith(".jdb");
+ }
+ }));
+
+ if ( !jdbFiles.isEmpty() ) {
+ Collections.sort(jdbFiles, Collections.reverseOrder());
+ FileInputStream fis = null;
+ try {
+ fis = new FileInputStream(jdbFiles.get(0).toString());
+ CheckedInputStream cis = new CheckedInputStream(fis, new Adler32());
+ byte[] tempBuf = new byte[8192];
+ while (cis.read(tempBuf) >= 0) {
+ }
+
+ return cis.getChecksum().getValue();
+ } catch (Exception e) {
+ if (debugEnabled())
+ {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ } finally {
+ if (fis != null) {
+ try {
+ fis.close();
+ } catch (Exception e) {
+ if (debugEnabled())
+ {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ }
+ }
+ }
+ }
+
+ // Log a warning that we have failed to checksum this db environment.
+ int msgID = MSGID_JEB_BACKEND_CHECKSUM_FAIL;
+ String message = getMessage(msgID, cfg.getBackendId());
+ logError(ErrorLogCategory.BACKEND, ErrorLogSeverity.SEVERE_WARNING,
+ message, msgID);
+
+ return 0;
+ }
+
/**
* This method constructs a container name from a base DN. Only alphanumeric
* characters are preserved, all other characters are replaced with an
@@ -239,9 +301,12 @@
public void initializeBackend()
throws ConfigException, InitializationException
{
+ // Checksum this db environment and register its offline state id/checksum.
+ DirectoryServer.registerOfflineBackendStateID(this.getBackendID(),
+ checksumDbEnv());
+
// Open the database environment
- try
- {
+ try {
rootContainer = new RootContainer(config, this);
rootContainer.open();
}
@@ -391,6 +456,10 @@
message, msgID);
}
+ // Checksum this db environment and register its offline state id/checksum.
+ DirectoryServer.registerOfflineBackendStateID(this.getBackendID(),
+ checksumDbEnv());
+
// Make sure the thread counts are zero for next initialization.
threadTotalCount.set(0);
threadWriteCount.set(0);
diff --git a/opendj-sdk/opends/src/server/org/opends/server/config/ConfigConstants.java b/opendj-sdk/opends/src/server/org/opends/server/config/ConfigConstants.java
index 343d7a3..e97af04 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/config/ConfigConstants.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/config/ConfigConstants.java
@@ -766,6 +766,118 @@
public static final int DEFAULT_FIFOCACHE_MAX_MEMORY_PCT = 90;
+ /**
+ * The name of the configuration attribute that contains a set of search
+ * filters to use to determine which entries should be excluded from the
+ * cache.
+ */
+ public static final String ATTR_FSCACHE_EXCLUDE_FILTER =
+ NAME_PREFIX_CFG + "exclude-filter";
+
+ /**
+ * The name of the configuration attribute that contains a set of search
+ * filters to use to determine which entries should be included in the cache.
+ */
+ public static final String ATTR_FSCACHE_INCLUDE_FILTER =
+ NAME_PREFIX_CFG + "include-filter";
+
+ /**
+ * The name of the configuration attribute that indicates the maximum length
+ * of time in milliseconds to spend trying to acquire a lock for an entry in
+ * the cache.
+ */
+ public static final String ATTR_FSCACHE_LOCK_TIMEOUT =
+ NAME_PREFIX_CFG + "lock-timeout";
+
+ /**
+ * The default value for the entry cache lockout timeout that will be used if
+ * no other value is specified.
+ */
+ public static final long DEFAULT_FSCACHE_LOCK_TIMEOUT = 2000L;
+
+ /**
+ * The name of the configuration attribute that indicates the maximum number
+ * of entries that the FIFO entry cache will be allowed to hold.
+ */
+ public static final String ATTR_FSCACHE_MAX_ENTRIES =
+ NAME_PREFIX_CFG + "max-entries";
+
+ /**
+ * The default value for the entry cache max entries that will be used if no
+ * other value is specified.
+ */
+ public static final long DEFAULT_FSCACHE_MAX_ENTRIES = Long.MAX_VALUE;
+
+ /**
+ * The name of the configuration attribute that indicates the maximum
+ * memory size of the FS entry cache.
+ */
+ public static final String ATTR_FSCACHE_MAX_MEMORY_SIZE =
+ NAME_PREFIX_CFG + "max-memory-size";
+
+ /**
+ * The name of the configuration attribute that specifies the entry cache JE
+ * environment home.
+ */
+ public static final String ATTR_FSCACHE_HOME =
+ NAME_PREFIX_CFG + "cache-directory";
+
+ /**
+ * The default value for the entry cache JE environment home that will be used
+ * if no other value is specified.
+ */
+ public static final String DEFAULT_FSCACHE_HOME = "/tmp/OpenDS.FSCache";
+
+ /**
+ * The name of the configuration attribute that indicates the maximum
+ * available space in bytes in the file system that JE cache will be
+ * allowed to consume.
+ */
+ public static final String ATTR_FSCACHE_JE_CACHE_SIZE =
+ NAME_PREFIX_CFG + "database-cache-size";
+
+ /**
+ * The default value for the JE cache size in bytes that will be used
+ * if no other value is specified.
+ */
+ public static final long DEFAULT_FSCACHE_JE_CACHE_SIZE = 0;
+
+ /**
+ * The name of the configuration attribute that indicates the maximum
+ * available memory percent that JE cache can consume.
+ */
+ public static final String ATTR_FSCACHE_JE_CACHE_PCT =
+ NAME_PREFIX_CFG + "database-cache-percent";
+
+ /**
+ * The default value for the JE cache size percent that will be used
+ * if no other value is specified.
+ */
+ public static final int DEFAULT_FSCACHE_JE_CACHE_PCT = 0;
+
+ /**
+ * The name of the configuration attribute that indicates whether
+ * file system entry cache is configured as persistent or not.
+ */
+ public static final String ATTR_FSCACHE_IS_PERSISTENT =
+ NAME_PREFIX_CFG + "persistent-cache";
+
+ /**
+ * The default value to indicate whether the cache is persistent or not.
+ */
+ public static final boolean DEFAULT_FSCACHE_IS_PERSISTENT = false;
+
+ /**
+ * The default value to indicate which cache type to use.
+ */
+ public static final String DEFAULT_FSCACHE_TYPE = "FIFO";
+
+ /**
+ * The name of the configuration attribute that indicates which
+ * cache type will be used.
+ */
+ public static final String ATTR_FSCACHE_TYPE =
+ NAME_PREFIX_CFG + "cache-type";
/**
* The name of the configuration attribute that specifies the fully-qualified
diff --git a/opendj-sdk/opends/src/server/org/opends/server/core/DirectoryServer.java b/opendj-sdk/opends/src/server/org/opends/server/core/DirectoryServer.java
index 68c80a8..4735728 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/core/DirectoryServer.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/core/DirectoryServer.java
@@ -540,6 +540,11 @@
// The set of backends registered with the server.
private TreeMap<String,Backend> backends;
+ // The mapping between backends and their unique indentifiers for their
+ // offline state, representing either checksum or other unique value to
+ // be used for detecting any offline modifications to a given backend.
+ private ConcurrentHashMap<String,Long> offlineBackendsStateIDs;
+
// The set of supported controls registered with the Directory Server.
private TreeSet<String> supportedControls;
@@ -662,6 +667,8 @@
directoryServer.monitorProviders =
new ConcurrentHashMap<String,MonitorProvider>();
directoryServer.backends = new TreeMap<String,Backend>();
+ directoryServer.offlineBackendsStateIDs =
+ new ConcurrentHashMap<String,Long>();
directoryServer.backendInitializationListeners =
new CopyOnWriteArraySet<BackendInitializationListener>();
directoryServer.baseDNs = new TreeMap<DN,Backend>();
@@ -1060,11 +1067,6 @@
initializeAlertHandlers();
- // Initialize the entry cache.
- entryCacheConfigManager = new EntryCacheConfigManager();
- entryCacheConfigManager.initializeEntryCache();
-
-
// Initialize the key manager provider.
keyManagerProviderConfigManager = new KeyManagerProviderConfigManager();
keyManagerProviderConfigManager.initializeKeyManagerProviders();
@@ -1099,6 +1101,13 @@
// Initialize all the backends and their associated suffixes.
initializeBackends();
+ // Initialize the entry cache.
+ entryCacheConfigManager = new EntryCacheConfigManager();
+ entryCacheConfigManager.initializeEntryCache();
+
+ // Reset the map as we can no longer guarantee offline state.
+ directoryServer.offlineBackendsStateIDs.clear();
+
// Register the supported controls and supported features.
initializeSupportedControls();
initializeSupportedFeatures();
@@ -6018,6 +6027,37 @@
/**
+ * This method returns a map that contains a unique offline state id,
+ * such as checksum, for every server backend that has registered one.
+ *
+ * @return <CODE>Map</CODE> backend to checksum map for offline state.
+ */
+ public static Map<String,Long> getOfflineBackendsStateIDs() {
+ return Collections.unmodifiableMap(directoryServer.offlineBackendsStateIDs);
+ }
+
+
+
+ /**
+ * This method allows any server backend to register its unique offline
+ * state, such as checksum, in a global map other server components can
+ * access to determine if any changes were made to given backend while
+ * offline.
+ *
+ * @param backend As returned by <CODE>getBackendID()</CODE> method.
+ *
+ * @param id Unique offline state identifier such as checksum.
+ */
+ public static void registerOfflineBackendStateID(String backend, long id) {
+ // Zero means failed checksum so just skip it.
+ if (id != 0) {
+ directoryServer.offlineBackendsStateIDs.put(backend, id);
+ }
+ }
+
+
+
+ /**
* Retrieves the entire set of base DNs registered with the Directory Server,
* mapped from the base DN to the backend responsible for that base DN. The
* same backend may be present multiple times, mapped from different base DNs.
@@ -8111,6 +8151,9 @@
}
}
+ // Finalize the entry cache.
+ DirectoryServer.getEntryCache().finalizeEntryCache();
+
// Release the exclusive lock for the Directory Server process.
String lockFile = LockFileManager.getServerLockFileName();
try
diff --git a/opendj-sdk/opends/src/server/org/opends/server/extensions/FileSystemEntryCache.java b/opendj-sdk/opends/src/server/org/opends/server/extensions/FileSystemEntryCache.java
new file mode 100644
index 0000000..bbdacb0
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/extensions/FileSystemEntryCache.java
@@ -0,0 +1,2363 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE. If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ * Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Portions Copyright 2007 Sun Microsystems, Inc.
+ */
+package org.opends.server.extensions;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.locks.Lock;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.SortedMap;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.atomic.AtomicLong;
+import java.io.UnsupportedEncodingException;
+import java.io.File;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseNotFoundException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.cleaner.UtilizationProfile;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import org.opends.server.api.Backend;
+import org.opends.server.api.EntryCache;
+import org.opends.server.admin.std.server.FileSystemEntryCacheCfg;
+import org.opends.server.admin.server.ConfigurationChangeListener;
+import org.opends.server.config.ConfigException;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.protocols.asn1.ASN1Element;
+import org.opends.server.protocols.asn1.ASN1OctetString;
+import org.opends.server.types.Attribute;
+import org.opends.server.types.AttributeType;
+import org.opends.server.types.AttributeValue;
+import org.opends.server.types.ConfigChangeResult;
+import org.opends.server.types.DN;
+import org.opends.server.types.Entry;
+import org.opends.server.types.ErrorLogCategory;
+import org.opends.server.types.ErrorLogSeverity;
+import org.opends.server.types.InitializationException;
+import org.opends.server.types.LockType;
+import org.opends.server.types.ResultCode;
+import org.opends.server.types.SearchFilter;
+import org.opends.server.types.FilePermission;
+import org.opends.server.types.LockManager;
+import org.opends.server.types.ObjectClass;
+import org.opends.server.types.DebugLogLevel;
+import org.opends.server.loggers.debug.DebugTracer;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import static org.opends.server.loggers.ErrorLogger.logError;
+import static org.opends.server.config.ConfigConstants.*;
+import static org.opends.server.messages.ExtensionsMessages.*;
+import static org.opends.server.messages.MessageHandler.*;
+import static org.opends.server.util.ServerConstants.*;
+import static org.opends.server.util.StaticUtils.*;
+
+/**
+ * This class defines a Directory Server entry cache that uses JE database to
+ * keep track of the entries. Intended use is when JE database resides in the
+ * memory based file system which has obvious performance benefits, although
+ * any file system will do for this cache to function. Entries are maintained
+ * either by FIFO (default) or LRU (configurable) based list implementation.
+ * <BR><BR>
+ * Cache sizing is based on the size or percentage of free space availble in
+ * the file system, such that if enough memory is free, then adding an entry
+ * to the cache will not require purging, but if more than a specified
+ * percentage of the file system available space is already consumed, then
+ * one or more entries will need to be removed in order to make room for a
+ * new entry. It is also possible to configure a maximum number of entries
+ * for the cache. If this is specified, then the number of entries will not
+ * be allowed to exceed this value, but it may not be possible to hold this
+ * many entries if the available memory fills up first.
+ * <BR><BR>
+ * Other configurable parameters for this cache include the maximum length of
+ * time to block while waiting to acquire a lock, and a set of filters that may
+ * be used to define criteria for determining which entries are stored in the
+ * cache. If a filter list is provided, then only entries matching at least
+ * one of the given filters will be stored in the cache.
+ * <BR><BR>
+ * JE environment cache size can also be configured either as percentage of
+ * the free memory available in the JVM or as explicit size in bytes.
+ * <BR><BR>
+ * This cache has a persistence property which, if enabled, allows for the
+ * contents of the cache to stay persistent across server or cache restarts.
+ */
+public class FileSystemEntryCache
+ extends EntryCache <FileSystemEntryCacheCfg>
+ implements ConfigurationChangeListener <FileSystemEntryCacheCfg> {
+ /**
+ * The tracer object for the debug logger.
+ */
+ private static final DebugTracer TRACER = getTracer();
+
+ /**
+ * The set of time units that will be used for expressing the task retention
+ * time.
+ */
+ private static final LinkedHashMap<String, Double> timeUnits =
+ new LinkedHashMap<String, Double>();
+
+ /**
+ * The set of units and their multipliers for configuration attributes
+ * representing a number of bytes.
+ */
+ private static HashMap<String, Double> memoryUnits =
+ new HashMap<String, Double>();
+
+ // Permissions for cache db environment.
+ private static final FilePermission CACHE_HOME_PERMISSIONS =
+ new FilePermission(0700);
+
+ // The DN of the configuration entry for this entry cache.
+ private DN configEntryDN;
+
+ // The set of filters that define the entries that should be excluded from the
+ // cache.
+ private Set<SearchFilter> excludeFilters;
+
+ // The set of filters that define the entries that should be included in the
+ // cache.
+ private Set<SearchFilter> includeFilters;
+
+ // The maximum amount of space in bytes that can be consumed in the filesystem
+ // before we need to start purging entries.
+ private long maxAllowedMemory;
+
+ // The maximum number of entries that may be held in the cache.
+ // Atomic for additional safely and in case we decide to push
+ // some locks further down later. Does not inhere in additional
+ // overhead, via blocking on synchronization primitive, on most
+ // modern platforms being implemented via cpu instruction set.
+ private AtomicLong maxEntries;
+
+ // The maximum percentage of memory dedicated to JE cache.
+ private int jeCachePercent;
+
+ // The maximum amount of memory in bytes dedicated to JE cache.
+ private long jeCacheSize;
+
+ // The entry cache home folder to host db environment.
+ private String cacheHome;
+
+ // The type of this cache.
+ // It can be either FIFO (default) or LRU (configurable).
+ private String cacheType;
+
+ // This regulates whether we persist the cache across restarts or not.
+ private boolean persistentCache;
+
+ // The lock used to provide threadsafe access when changing the contents
+ // of the cache maps.
+ private ReentrantReadWriteLock cacheLock;
+ private Lock cacheReadLock;
+ private Lock cacheWriteLock;
+
+ // The maximum length of time to try to obtain a lock before giving up.
+ private long lockTimeout;
+
+ // The mapping between DNs and IDs. This is the main index map for this
+ // cache, keyed to the underlying JE database where entries are stored.
+ private Map<DN,Long> dnMap;
+
+ // The mapping between entry backends/IDs and DNs to identify all
+ // entries that belong to given backend since entry ID is only
+ // per backend unique.
+ private Map<Backend,Map<Long,DN>> backendMap;
+
+ // Access order for this cache. FIFO by default.
+ boolean accessOrder = false;
+
+ // JE environment and database related fields for this cache.
+ private Environment entryCacheEnv;
+ private EnvironmentConfig entryCacheEnvConfig;
+ private EnvironmentMutableConfig entryCacheEnvMutableConfig;
+ private DatabaseConfig entryCacheDBConfig;
+
+ // The main entry cache database.
+ private Database entryCacheDB;
+
+ // Class database, catalog and binding for serialization.
+ private Database entryCacheClassDB;
+ private StoredClassCatalog classCatalog;
+ private EntryBinding entryCacheDataBinding;
+
+ // JE naming constants.
+ private static final String ENTRYCACHEDBNAME = "EntryCacheDB";
+ private static final String INDEXCLASSDBNAME = "IndexClassDB";
+ private static final String INDEXKEY = "EntryCacheIndex";
+
+ // JE config constants.
+ // TODO: All hardcoded for now but we need to use a common
+ // ds-cfg-je-property like multi-valued attribute for this, see Issue 1481.
+ private static final Long JEBYTESINTERVAL = 10485760L;
+ private static final Long JELOGFILEMAX = 10485760L;
+ private static final Integer JEMINFILEUTILIZATION = 50;
+ private static final Integer JEMINUTILIZATION = 90;
+ private static final Integer JEMAXBATCHFILES = 1;
+ private static final Integer JEMINAGE = 1;
+
+ // The number of milliseconds between persistent state save/restore
+ // progress reports.
+ private long progressInterval = 5000;
+
+ // Persistent state save/restore progress report counters.
+ private long persistentEntriesSaved = 0;
+ private long persistentEntriesRestored = 0;
+
+ /**
+ * Creates a new instance of this entry cache.
+ */
+ public FileSystemEntryCache() {
+ super();
+ // All initialization should be performed in the initializeEntryCache.
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void initializeEntryCache(FileSystemEntryCacheCfg configuration)
+ throws ConfigException, InitializationException {
+
+ configuration.addFileSystemChangeListener (this);
+ configEntryDN = configuration.dn();
+
+ // Read and apply configuration.
+ boolean applyChanges = true;
+ EntryCacheCommon.ConfigErrorHandler errorHandler =
+ EntryCacheCommon.getConfigErrorHandler (
+ EntryCacheCommon.ConfigPhase.PHASE_INIT, null, null
+ );
+ processEntryCacheConfig(configuration, applyChanges, errorHandler);
+
+ // Set the cache type.
+ if (cacheType.equalsIgnoreCase("LRU")) {
+ accessOrder = true;
+ } else {
+ // Admin framework should only allow for either FIFO or LRU but
+ // we set the type to default here explicitly if it is not LRU.
+ cacheType = DEFAULT_FSCACHE_TYPE;
+ accessOrder = false;
+ }
+
+ // Initialize the cache maps and locks.
+ backendMap = new LinkedHashMap<Backend,Map<Long,DN>>();
+ dnMap = new LinkedHashMapRotator<DN,Long>((int) 16, (float) 0.75,
+ (boolean) accessOrder);
+
+ cacheLock = new ReentrantReadWriteLock();
+ if (accessOrder) {
+ // In access-ordered linked hash maps, merely querying the map
+ // with get() is a structural modification.
+ cacheReadLock = cacheLock.writeLock();
+ } else {
+ cacheReadLock = cacheLock.readLock();
+ }
+ cacheWriteLock = cacheLock.writeLock();
+
+ // Setup the cache home.
+ try {
+ checkAndSetupCacheHome(cacheHome);
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // Log an error message.
+ logError(ErrorLogCategory.CONFIGURATION,
+ ErrorLogSeverity.SEVERE_ERROR,
+ MSGID_FSCACHE_INVALID_HOME,
+ String.valueOf(configEntryDN), stackTraceToSingleLineString(e),
+ cacheHome, DEFAULT_FSCACHE_HOME);
+
+ // User specified home is no good, reset to default.
+ cacheHome = DEFAULT_FSCACHE_HOME;
+
+ // Try again.
+ try {
+ checkAndSetupCacheHome(cacheHome);
+ } catch (Exception e2) {
+ // Not having any home for the cache db environment at this point is a
+ // fatal error as we are unable to continue any further without it.
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e2);
+ }
+
+ int msgID = MSGID_FSCACHE_HOMELESS;
+ String message = getMessage(msgID, stackTraceToSingleLineString(e2));
+ throw new InitializationException(msgID, message, e2);
+ }
+ }
+
+ // Open JE environment and cache database.
+ try {
+ entryCacheEnvConfig = new EnvironmentConfig();
+
+ // All these environment properties are cranked up to their extreme
+ // values, either max or min, to get the smallest space consumption,
+ // which turns into memory consumption for memory based filesystems,
+ // possible. This will negate the performance somewhat but preserves
+ // the memory to a much greater extent.
+ //
+ // TODO: All these options should be configurable, see Issue 1481.
+ //
+ entryCacheEnvConfig.setConfigParam("je.log.fileMax",
+ JELOGFILEMAX.toString());
+ entryCacheEnvConfig.setConfigParam("je.cleaner.minUtilization",
+ JEMINUTILIZATION.toString());
+ entryCacheEnvConfig.setConfigParam("je.cleaner.maxBatchFiles",
+ JEMAXBATCHFILES.toString());
+ entryCacheEnvConfig.setConfigParam("je.cleaner.minAge",
+ JEMINAGE.toString());
+ entryCacheEnvConfig.setConfigParam("je.cleaner.minFileUtilization",
+ JEMINFILEUTILIZATION.toString());
+ entryCacheEnvConfig.setConfigParam("je.checkpointer.bytesInterval",
+ JEBYTESINTERVAL.toString());
+
+ entryCacheEnvConfig.setAllowCreate(true);
+ entryCacheEnv = new Environment(new File(cacheHome), entryCacheEnvConfig);
+
+ // Set JE cache percent and size where the size value will prevail if set.
+ entryCacheEnvMutableConfig = new EnvironmentMutableConfig();
+ if (jeCachePercent != 0) {
+ try {
+ entryCacheEnvMutableConfig.setCachePercent(jeCachePercent);
+ } catch (IllegalArgumentException e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // Its safe to ignore and continue here, JE will use its default
+ // value for this however we have to let the user know about it
+ // so just log an error message.
+ logError(ErrorLogCategory.CONFIGURATION,
+ ErrorLogSeverity.SEVERE_ERROR,
+ MSGID_FSCACHE_CANNOT_SET_JE_MEMORY_PCT,
+ String.valueOf(configEntryDN), stackTraceToSingleLineString(e));
+ }
+ }
+ if (jeCacheSize != 0) {
+ try {
+ entryCacheEnvMutableConfig.setCacheSize(jeCacheSize);
+ } catch (IllegalArgumentException e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // Its safe to ignore and continue here, JE will use its default
+ // value for this however we have to let the user know about it
+ // so just log an error message.
+ logError(ErrorLogCategory.CONFIGURATION,
+ ErrorLogSeverity.SEVERE_ERROR,
+ MSGID_FSCACHE_CANNOT_SET_JE_MEMORY_SIZE,
+ String.valueOf(configEntryDN), stackTraceToSingleLineString(e));
+ }
+ }
+
+ entryCacheEnv.setMutableConfig(entryCacheEnvMutableConfig);
+ entryCacheDBConfig = new DatabaseConfig();
+ entryCacheDBConfig.setAllowCreate(true);
+
+ // Remove old cache databases if this cache is not persistent.
+ if ( !persistentCache ) {
+ try {
+ entryCacheEnv.removeDatabase(null, INDEXCLASSDBNAME);
+ } catch (DatabaseNotFoundException e) {}
+ try {
+ entryCacheEnv.removeDatabase(null, ENTRYCACHEDBNAME);
+ } catch (DatabaseNotFoundException e) {}
+ }
+
+ entryCacheDB = entryCacheEnv.openDatabase(null,
+ ENTRYCACHEDBNAME, entryCacheDBConfig);
+ entryCacheClassDB =
+ entryCacheEnv.openDatabase(null, INDEXCLASSDBNAME, entryCacheDBConfig);
+ // Instantiate the class catalog
+ classCatalog = new StoredClassCatalog(entryCacheClassDB);
+ entryCacheDataBinding =
+ new SerialBinding(classCatalog, FileSystemEntryCacheIndex.class);
+
+ // Restoration is static and not subject to the current configuration
+ // constraints so that the persistent state is truly preserved and
+ // restored to the exact same state where we left off when the cache
+ // has been made persistent. The only exception to this is the backend
+ // offline state matching where entries that belong to backend which
+ // we cannot match offline state for are discarded from the cache.
+ if ( persistentCache ) {
+ // Retrieve cache index.
+ try {
+ FileSystemEntryCacheIndex entryCacheIndex;
+ DatabaseEntry indexData = new DatabaseEntry();
+ DatabaseEntry indexKey = new DatabaseEntry(
+ INDEXKEY.getBytes("UTF-8"));
+
+ if (OperationStatus.SUCCESS ==
+ entryCacheDB.get(null, indexKey, indexData, LockMode.DEFAULT)) {
+ entryCacheIndex =
+ (FileSystemEntryCacheIndex)
+ entryCacheDataBinding.entryToObject(indexData);
+ } else {
+ throw new CacheIndexNotFoundException();
+ }
+ // Check cache index state.
+ if ((entryCacheIndex.dnMap.isEmpty()) ||
+ (entryCacheIndex.backendMap.isEmpty()) ||
+ (entryCacheIndex.offlineState.isEmpty())) {
+ throw new CacheIndexImpairedException();
+ } else {
+ // Restore entry cache maps from this index.
+
+ // Push maxEntries and make it unlimited til restoration complete.
+ AtomicLong currentMaxEntries = maxEntries;
+ maxEntries.set(DEFAULT_FSCACHE_MAX_ENTRIES);
+
+ // Convert cache index maps to entry cache maps.
+ Set<String> backendSet = entryCacheIndex.backendMap.keySet();
+ Iterator<String> backendIterator = backendSet.iterator();
+
+ // Start a timer for the progress report.
+ final long persistentEntriesTotal = entryCacheIndex.dnMap.size();
+ Timer timer = new Timer();
+ TimerTask progressTask = new TimerTask() {
+ // Persistent state restore progress report.
+ public void run() {
+ if ((persistentEntriesRestored > 0) &&
+ (persistentEntriesRestored < persistentEntriesTotal)) {
+ int msgID = MSGID_FSCACHE_RESTORE_PROGRESS_REPORT;
+ String message = getMessage(msgID, persistentEntriesRestored,
+ persistentEntriesTotal);
+ logError(ErrorLogCategory.EXTENSIONS, ErrorLogSeverity.NOTICE,
+ message, msgID);
+ }
+ }
+ };
+ timer.scheduleAtFixedRate(progressTask, progressInterval,
+ progressInterval);
+ try {
+ while (backendIterator.hasNext()) {
+ String backend = backendIterator.next();
+ Map<Long,String> entriesMap =
+ entryCacheIndex.backendMap.get(backend);
+ Set<Long> entriesSet = entriesMap.keySet();
+ Iterator<Long> entriesIterator = entriesSet.iterator();
+ LinkedHashMap<Long,DN> entryMap = new LinkedHashMap<Long,DN>();
+ while (entriesIterator.hasNext()) {
+ Long entryID = entriesIterator.next();
+ String entryStringDN = entriesMap.get(entryID);
+ DN entryDN = DN.decode(entryStringDN);
+ dnMap.put(entryDN, entryID);
+ entryMap.put(entryID, entryDN);
+ persistentEntriesRestored++;
+ }
+ backendMap.put(DirectoryServer.getBackend(backend), entryMap);
+ }
+ } finally {
+ // Stop persistent state restore progress report timer.
+ timer.cancel();
+
+ // Final persistent state restore progress report.
+ int msgID = MSGID_FSCACHE_RESTORE_PROGRESS_REPORT;
+ String message = getMessage(msgID, persistentEntriesRestored,
+ persistentEntriesTotal);
+ logError(ErrorLogCategory.EXTENSIONS, ErrorLogSeverity.NOTICE,
+ message, msgID);
+ }
+
+ // Compare last known offline states to offline states on startup.
+ Map<String,Long> currentBackendsState =
+ DirectoryServer.getOfflineBackendsStateIDs();
+ Set<String> offlineBackendSet =
+ entryCacheIndex.offlineState.keySet();
+ Iterator<String> offlineBackendIterator =
+ offlineBackendSet.iterator();
+ while (offlineBackendIterator.hasNext()) {
+ String backend = offlineBackendIterator.next();
+ Long offlineId = entryCacheIndex.offlineState.get(backend);
+ Long currentId = currentBackendsState.get(backend);
+ if ( !(offlineId.equals(currentId)) ) {
+ // Remove cache entries specific to this backend.
+ clearBackend(DirectoryServer.getBackend(backend));
+ // Log an error message.
+ logError(ErrorLogCategory.EXTENSIONS,
+ ErrorLogSeverity.SEVERE_WARNING,
+ MSGID_FSCACHE_OFFLINE_STATE_FAIL,
+ backend);
+ }
+ }
+ // Pop max entries limit.
+ maxEntries = currentMaxEntries;
+ }
+ } catch (CacheIndexNotFoundException e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // Log an error message.
+ logError(ErrorLogCategory.EXTENSIONS, ErrorLogSeverity.NOTICE,
+ MSGID_FSCACHE_INDEX_NOT_FOUND);
+
+ // Clear the entry cache.
+ clear();
+ } catch (CacheIndexImpairedException e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // Log an error message.
+ logError(ErrorLogCategory.EXTENSIONS, ErrorLogSeverity.SEVERE_ERROR,
+ MSGID_FSCACHE_INDEX_IMPAIRED);
+
+ // Clear the entry cache.
+ clear();
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // Log an error message.
+ logError(ErrorLogCategory.EXTENSIONS, ErrorLogSeverity.SEVERE_ERROR,
+ MSGID_FSCACHE_CANNOT_LOAD_PERSISTENT_DATA,
+ stackTraceToSingleLineString(e));
+
+ // Clear the entry cache.
+ clear();
+ }
+ }
+ } catch (Exception e) {
+ // If we got here it means we have failed to have a proper backend
+ // for this entry cache and there is absolutely no point going any
+ // farther from here.
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ int msgID = MSGID_FSCACHE_CANNOT_INITIALIZE;
+ String message = getMessage(msgID, stackTraceToSingleLineString(e));
+ throw new InitializationException(msgID, message, e);
+ }
+
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void finalizeEntryCache() {
+
+ cacheWriteLock.lock();
+
+ // Store index/maps in case of persistent cache. Since the cache database
+ // already exist at this point all we have to do is to serialize cache
+ // index maps @see FileSystemEntryCacheIndex and put them under indexkey
+ // allowing for the index to be restored and cache contents reused upon
+ // the next initialization.
+ if (persistentCache) {
+ FileSystemEntryCacheIndex entryCacheIndex =
+ new FileSystemEntryCacheIndex();
+ // There must be at least one backend at this stage.
+ entryCacheIndex.offlineState =
+ DirectoryServer.getOfflineBackendsStateIDs();
+
+ // Convert entry cache maps to serializable maps for the cache index.
+ Set<Backend> backendSet = backendMap.keySet();
+ Iterator<Backend> backendIterator = backendSet.iterator();
+
+ // Start a timer for the progress report.
+ final long persistentEntriesTotal = dnMap.size();
+ Timer timer = new Timer();
+ TimerTask progressTask = new TimerTask() {
+ // Persistent state save progress report.
+ public void run() {
+ if ((persistentEntriesSaved > 0) &&
+ (persistentEntriesSaved < persistentEntriesTotal)) {
+ int msgID = MSGID_FSCACHE_SAVE_PROGRESS_REPORT;
+ String message = getMessage(msgID, persistentEntriesSaved,
+ persistentEntriesTotal);
+ logError(ErrorLogCategory.EXTENSIONS, ErrorLogSeverity.NOTICE,
+ message, msgID);
+ }
+ }
+ };
+ timer.scheduleAtFixedRate(progressTask, progressInterval,
+ progressInterval);
+
+ try {
+ while (backendIterator.hasNext()) {
+ Backend backend = backendIterator.next();
+ Map<Long,DN> entriesMap = backendMap.get(backend);
+ Map<Long,String> entryMap = new LinkedHashMap<Long,String>();
+ for (Long entryID : entriesMap.keySet()) {
+ DN entryDN = entriesMap.get(entryID);
+ entryCacheIndex.dnMap.put(entryDN.toNormalizedString(), entryID);
+ entryMap.put(entryID, entryDN.toNormalizedString());
+ persistentEntriesSaved++;
+ }
+ entryCacheIndex.backendMap.put(backend.getBackendID(), entryMap);
+ }
+ } finally {
+ // Stop persistent state save progress report timer.
+ timer.cancel();
+
+ // Final persistent state save progress report.
+ int msgID = MSGID_FSCACHE_SAVE_PROGRESS_REPORT;
+ String message = getMessage(msgID, persistentEntriesSaved,
+ persistentEntriesTotal);
+ logError(ErrorLogCategory.EXTENSIONS, ErrorLogSeverity.NOTICE,
+ message, msgID);
+ }
+
+ // Store the index.
+ try {
+ DatabaseEntry indexData = new DatabaseEntry();
+ entryCacheDataBinding.objectToEntry(entryCacheIndex, indexData);
+ DatabaseEntry indexKey = new DatabaseEntry(INDEXKEY.getBytes("UTF-8"));
+ if (OperationStatus.SUCCESS !=
+ entryCacheDB.put(null, indexKey, indexData)) {
+ throw new Exception();
+ }
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // Log an error message.
+ logError(ErrorLogCategory.EXTENSIONS, ErrorLogSeverity.SEVERE_ERROR,
+ MSGID_FSCACHE_CANNOT_STORE_PERSISTENT_DATA,
+ stackTraceToSingleLineString(e));
+ }
+ }
+
+ // Close JE databases and environment and clear all the maps.
+ try {
+ backendMap.clear();
+ dnMap.clear();
+ if (entryCacheDB != null) {
+ entryCacheDB.close();
+ }
+ if (entryCacheClassDB != null) {
+ entryCacheClassDB.close();
+ }
+ if (entryCacheEnv != null) {
+ // Remove cache and index dbs if this cache is not persistent.
+ if ( !persistentCache ) {
+ try {
+ entryCacheEnv.removeDatabase(null, INDEXCLASSDBNAME);
+ } catch (DatabaseNotFoundException e) {}
+ try {
+ entryCacheEnv.removeDatabase(null, ENTRYCACHEDBNAME);
+ } catch (DatabaseNotFoundException e) {}
+ }
+ entryCacheEnv.cleanLog();
+ entryCacheEnv.close();
+ }
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // That is ok, JE verification and repair on startup should take care of
+ // this so if there are any unrecoverable errors during next startup
+ // and we are unable to handle and cleanup them we will log errors then.
+ } finally {
+ cacheWriteLock.unlock();
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public boolean containsEntry(DN entryDN)
+ {
+ // Indicate whether the DN map contains the specified DN.
+ boolean containsEntry = false;
+ cacheReadLock.lock();
+ try {
+ containsEntry = dnMap.containsKey(entryDN);
+ } finally {
+ cacheReadLock.unlock();
+ }
+ return containsEntry;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Entry getEntry(DN entryDN) {
+ // Get the entry from the DN map if it is present. If not, then return
+ // null.
+ Entry entry = null;
+ cacheReadLock.lock();
+ try {
+ if (dnMap.containsKey(entryDN)) {
+ entry = getEntryFromDB(entryDN);
+ }
+ } finally {
+ cacheReadLock.unlock();
+ }
+ return entry;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public long getEntryID(DN entryDN) {
+ long entryID = -1;
+ cacheReadLock.lock();
+ try {
+ Long eid = dnMap.get(entryDN);
+ if (eid != null) {
+ entryID = eid.longValue();
+ }
+ } finally {
+ cacheReadLock.unlock();
+ }
+ return entryID;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Entry getEntry(DN entryDN, LockType lockType, List<Lock> lockList) {
+
+ Entry entry = getEntry(entryDN);
+ if (entry == null)
+ {
+ return null;
+ }
+
+ // Obtain a lock for the entry as appropriate. If an error occurs, then
+ // make sure no lock is held and return null. Otherwise, return the entry.
+ switch (lockType)
+ {
+ case READ:
+ // Try to obtain a read lock for the entry.
+ Lock readLock = LockManager.lockRead(entryDN, lockTimeout);
+ if (readLock == null)
+ {
+ // We couldn't get the lock, so we have to return null.
+ return null;
+ }
+ else
+ {
+ try
+ {
+ lockList.add(readLock);
+ return entry;
+ }
+ catch (Exception e)
+ {
+ if (debugEnabled())
+ {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // The attempt to add the lock to the list failed, so we need to
+ // release the lock and return null.
+ try
+ {
+ LockManager.unlock(entryDN, readLock);
+ }
+ catch (Exception e2)
+ {
+ if (debugEnabled())
+ {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e2);
+ }
+ }
+
+ return null;
+ }
+ }
+
+ case WRITE:
+ // Try to obtain a write lock for the entry.
+ Lock writeLock = LockManager.lockWrite(entryDN, lockTimeout);
+ if (writeLock == null)
+ {
+ // We couldn't get the lock, so we have to return null.
+ return null;
+ }
+ else
+ {
+ try
+ {
+ lockList.add(writeLock);
+ return entry;
+ }
+ catch (Exception e)
+ {
+ if (debugEnabled())
+ {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // The attempt to add the lock to the list failed, so we need to
+ // release the lock and return null.
+ try
+ {
+ LockManager.unlock(entryDN, writeLock);
+ }
+ catch (Exception e2)
+ {
+ if (debugEnabled())
+ {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e2);
+ }
+ }
+
+ return null;
+ }
+ }
+
+ case NONE:
+ // We don't need to obtain a lock, so just return the entry.
+ return entry;
+
+ default:
+ // This is an unknown type of lock, so we'll return null.
+ return null;
+ }
+ }
+
+ /**
+ * Retrieves the requested entry if it is present in the cache.
+ *
+ * @param backend The backend associated with the entry to retrieve.
+ * @param entryID The entry ID within the provided backend for the
+ * specified entry.
+ *
+ * @return The requested entry if it is present in the cache, or
+ * <CODE>null</CODE> if it is not present.
+ */
+ public Entry getEntry(Backend backend, long entryID) {
+
+ Entry entry = null;
+ cacheReadLock.lock();
+ try {
+ // Get the map for the provided backend. If it isn't present, then
+ // return null.
+ Map map = backendMap.get(backend);
+ if ( !(map == null) ) {
+ // Get the entry from the map by its ID. If it isn't present, then
+ // return null.
+ DN dn = (DN) map.get(entryID);
+ if ( !(dn == null) ) {
+ if (dnMap.containsKey(dn)) {
+ entry = getEntryFromDB(dn);
+ }
+ }
+ }
+ } finally {
+ cacheReadLock.unlock();
+ }
+ return entry;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Entry getEntry(Backend backend, long entryID, LockType lockType,
+ List<Lock> lockList) {
+
+ Entry entry = getEntry(backend, entryID);
+ if (entry == null)
+ {
+ return null;
+ }
+
+ // Obtain a lock for the entry as appropriate. If an error occurs, then
+ // make sure no lock is held and return null. Otherwise, return the entry.
+ switch (lockType)
+ {
+ case READ:
+ // Try to obtain a read lock for the entry.
+ Lock readLock = LockManager.lockRead(entry.getDN(), lockTimeout);
+ if (readLock == null)
+ {
+ // We couldn't get the lock, so we have to return null.
+ return null;
+ }
+ else
+ {
+ try
+ {
+ lockList.add(readLock);
+ return entry;
+ }
+ catch (Exception e)
+ {
+ if (debugEnabled())
+ {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // The attempt to add the lock to the list failed, so we need to
+ // release the lock and return null.
+ try
+ {
+ LockManager.unlock(entry.getDN(), readLock);
+ }
+ catch (Exception e2)
+ {
+ if (debugEnabled())
+ {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e2);
+ }
+ }
+
+ return null;
+ }
+ }
+
+ case WRITE:
+ // Try to obtain a write lock for the entry.
+ Lock writeLock = LockManager.lockWrite(entry.getDN(), lockTimeout);
+ if (writeLock == null)
+ {
+ // We couldn't get the lock, so we have to return null.
+ return null;
+ }
+ else
+ {
+ try
+ {
+ lockList.add(writeLock);
+ return entry;
+ }
+ catch (Exception e)
+ {
+ if (debugEnabled())
+ {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // The attempt to add the lock to the list failed, so we need to
+ // release the lock and return null.
+ try
+ {
+ LockManager.unlock(entry.getDN(), writeLock);
+ }
+ catch (Exception e2)
+ {
+ if (debugEnabled())
+ {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e2);
+ }
+ }
+
+ return null;
+ }
+ }
+
+ case NONE:
+ // We don't need to obtain a lock, so just return the entry.
+ return entry;
+
+ default:
+ // This is an unknown type of lock, so we'll return null.
+ return null;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void putEntry(Entry entry, Backend backend, long entryID) {
+
+ // If there is a set of exclude filters, then make sure that the provided
+ // entry doesn't match any of them.
+ if (! excludeFilters.isEmpty()) {
+ for (SearchFilter f : excludeFilters) {
+ try {
+ if (f.matchesEntry(entry)) {
+ return;
+ }
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // This shouldn't happen, but if it does then we can't be sure whether
+ // the entry should be excluded, so we will by default.
+ return;
+ }
+ }
+ }
+
+ // If there is a set of include filters, then make sure that the provided
+ // entry matches at least one of them.
+ if (! includeFilters.isEmpty()) {
+ boolean matchFound = false;
+ for (SearchFilter f : includeFilters) {
+ try {
+ if (f.matchesEntry(entry)) {
+ matchFound = true;
+ break;
+ }
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // This shouldn't happen, but if it does, then just ignore it.
+ }
+ }
+
+ if (! matchFound) {
+ return;
+ }
+ }
+
+ // Obtain a lock on the cache. If this fails, then don't do anything.
+ try {
+ if (!cacheWriteLock.tryLock(lockTimeout, TimeUnit.MILLISECONDS)) {
+ return;
+ }
+ putEntryToDB(entry, backend, entryID);
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ return;
+ } finally {
+ cacheWriteLock.unlock();
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public boolean putEntryIfAbsent(Entry entry, Backend backend, long entryID)
+ {
+ // If there is a set of exclude filters, then make sure that the provided
+ // entry doesn't match any of them.
+ if (! excludeFilters.isEmpty()) {
+ for (SearchFilter f : excludeFilters) {
+ try {
+ if (f.matchesEntry(entry)) {
+ return true;
+ }
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // This shouldn't happen, but if it does then we can't be sure whether
+ // the entry should be excluded, so we will by default.
+ return false;
+ }
+ }
+ }
+
+ // If there is a set of include filters, then make sure that the provided
+ // entry matches at least one of them.
+ if (! includeFilters.isEmpty()) {
+ boolean matchFound = false;
+ for (SearchFilter f : includeFilters) {
+ try {
+ if (f.matchesEntry(entry)) {
+ matchFound = true;
+ break;
+ }
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // This shouldn't happen, but if it does, then just ignore it.
+ }
+ }
+
+ if (! matchFound) {
+ return true;
+ }
+ }
+
+ try {
+ // Obtain a lock on the cache. If this fails, then don't do anything.
+ if (! cacheWriteLock.tryLock(lockTimeout, TimeUnit.MILLISECONDS)) {
+ // We can't rule out the possibility of a conflict, so return false.
+ return false;
+ }
+ // See if the entry already exists in the cache. If it does, then we will
+ // fail and not actually store the entry.
+ if (dnMap.containsKey(entry.getDN())) {
+ return false;
+ }
+ return putEntryToDB(entry, backend, entryID);
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ // We can't rule out the possibility of a conflict, so return false.
+ return false;
+ } finally {
+ cacheWriteLock.unlock();
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void removeEntry(DN entryDN) {
+
+ cacheWriteLock.lock();
+
+ try {
+ Long entryID = dnMap.get(entryDN);
+ if (entryID == null) {
+ return;
+ }
+ for (Map<Long,DN> map : backendMap.values()) {
+ if ((map.get(entryID) != null) &&
+ (map.get(entryID).equals(entryDN))) {
+ map.remove(entryID);
+ }
+ }
+
+ dnMap.remove(entryDN);
+ entryCacheDB.delete(null,
+ new DatabaseEntry(entryDN.toNormalizedString().getBytes("UTF-8")));
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ } finally {
+ cacheWriteLock.unlock();
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void clear() {
+ cacheWriteLock.lock();
+
+ dnMap.clear();
+ backendMap.clear();
+
+ try {
+ if ((entryCacheDB != null) && (entryCacheEnv != null) &&
+ (entryCacheClassDB != null) && (entryCacheDBConfig != null)) {
+ entryCacheDBConfig = entryCacheDB.getConfig();
+ entryCacheDB.close();
+ entryCacheClassDB.close();
+ entryCacheEnv.truncateDatabase(null, ENTRYCACHEDBNAME, false);
+ entryCacheEnv.truncateDatabase(null, INDEXCLASSDBNAME, false);
+ entryCacheEnv.cleanLog();
+ entryCacheDB = entryCacheEnv.openDatabase(null,
+ ENTRYCACHEDBNAME, entryCacheDBConfig);
+ entryCacheClassDB = entryCacheEnv.openDatabase(null,
+ INDEXCLASSDBNAME, entryCacheDBConfig);
+ // Instantiate the class catalog
+ classCatalog = new StoredClassCatalog(entryCacheClassDB);
+ entryCacheDataBinding =
+ new SerialBinding(classCatalog, FileSystemEntryCacheIndex.class);
+ }
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ } finally {
+ cacheWriteLock.unlock();
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void clearBackend(Backend backend) {
+
+ cacheWriteLock.lock();
+
+ Map<Long,DN> backendEntriesMap = backendMap.get(backend);
+
+ try {
+ int entriesExamined = 0;
+ Set<Long> entriesSet = backendEntriesMap.keySet();
+ Iterator<Long> backendEntriesIterator = entriesSet.iterator();
+ while (backendEntriesIterator.hasNext()) {
+ long entryID = backendEntriesIterator.next();
+ DN entryDN = backendEntriesMap.get(entryID);
+ entryCacheDB.delete(null,
+ new DatabaseEntry(entryDN.toNormalizedString().getBytes("UTF-8")));
+ dnMap.remove(entryDN);
+
+ // This can take a while, so we'll periodically release and re-acquire
+ // the lock in case anyone else is waiting on it so this doesn't become
+ // a stop-the-world event as far as the cache is concerned.
+ entriesExamined++;
+ if ((entriesExamined % 1000) == 0) {
+ cacheWriteLock.unlock();
+ Thread.currentThread().yield();
+ cacheWriteLock.lock();
+ }
+ }
+
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ } finally {
+ backendMap.remove(backend);
+ cacheWriteLock.unlock();
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void clearSubtree(DN baseDN) {
+ // Determine which backend should be used for the provided base DN. If
+ // there is none, then we don't need to do anything.
+ Backend backend = DirectoryServer.getBackend(baseDN);
+ if (backend == null)
+ {
+ return;
+ }
+
+ // Acquire a lock on the cache. We should not return until the cache has
+ // been cleared, so we will block until we can obtain the lock.
+ cacheWriteLock.lock();
+
+ // At this point, it is absolutely critical that we always release the lock
+ // before leaving this method, so do so in a finally block.
+ try
+ {
+ clearSubtree(baseDN, backend);
+ }
+ catch (Exception e)
+ {
+ if (debugEnabled())
+ {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ // This shouldn't happen, but there's not much that we can do if it does.
+ }
+ finally
+ {
+ cacheWriteLock.unlock();
+ }
+ }
+
+ /**
+ * Clears all entries at or below the specified base DN that are associated
+ * with the given backend. The caller must already hold the cache lock.
+ *
+ * @param baseDN The base DN below which all entries should be flushed.
+ * @param backend The backend for which to remove the appropriate entries.
+ */
+ private void clearSubtree(DN baseDN, Backend backend) {
+ // See if there are any entries for the provided backend in the cache. If
+ // not, then return.
+ Map<Long,DN> map = backendMap.get(backend);
+ if (map == null)
+ {
+ // No entries were in the cache for this backend, so we can return without
+ // doing anything.
+ return;
+ }
+
+ // Since the provided base DN could hold a subset of the information in the
+ // specified backend, we will have to do this by iterating through all the
+ // entries for that backend. Since this could take a while, we'll
+ // periodically release and re-acquire the lock in case anyone else is
+ // waiting on it so this doesn't become a stop-the-world event as far as the
+ // cache is concerned.
+ int entriesExamined = 0;
+ Iterator<DN> iterator = map.values().iterator();
+ while (iterator.hasNext())
+ {
+ DN entryDN = iterator.next();
+ if (entryDN.isDescendantOf(baseDN))
+ {
+ iterator.remove();
+ dnMap.remove(entryDN);
+ try {
+ entryCacheDB.delete(null,
+ new DatabaseEntry(
+ entryDN.toNormalizedString().getBytes("UTF-8")));
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ }
+ }
+
+ entriesExamined++;
+ if ((entriesExamined % 1000) == 0)
+ {
+ cacheWriteLock.unlock();
+ Thread.currentThread().yield();
+ cacheWriteLock.lock();
+ }
+ }
+
+ // See if the backend has any subordinate backends. If so, then process
+ // them recursively.
+ for (Backend subBackend : backend.getSubordinateBackends())
+ {
+ boolean isAppropriate = false;
+ for (DN subBase : subBackend.getBaseDNs())
+ {
+ if (subBase.isDescendantOf(baseDN))
+ {
+ isAppropriate = true;
+ break;
+ }
+ }
+
+ if (isAppropriate)
+ {
+ clearSubtree(baseDN, subBackend);
+ }
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void handleLowMemory() {
+ // This is about all we can do.
+ if (entryCacheEnv != null) {
+ try {
+ // Free some JVM memory.
+ entryCacheEnv.evictMemory();
+ // Free some main memory/space.
+ entryCacheEnv.cleanLog();
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ }
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public boolean isConfigurationChangeAcceptable(
+ FileSystemEntryCacheCfg configuration,
+ List<String> unacceptableReasons
+ )
+ {
+ // Make sure that we can process the defined character sets. If so, then
+ // we'll accept the new configuration.
+ boolean applyChanges = false;
+ EntryCacheCommon.ConfigErrorHandler errorHandler =
+ EntryCacheCommon.getConfigErrorHandler (
+ EntryCacheCommon.ConfigPhase.PHASE_ACCEPTABLE,
+ unacceptableReasons,
+ null
+ );
+ processEntryCacheConfig (configuration, applyChanges, errorHandler);
+
+ return errorHandler.getIsAcceptable();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public ConfigChangeResult applyConfigurationChange(
+ FileSystemEntryCacheCfg configuration
+ )
+ {
+ // Make sure that we can process the defined character sets. If so, then
+ // activate the new configuration.
+ boolean applyChanges = false;
+ ArrayList<String> errorMessages = new ArrayList<String>();
+ EntryCacheCommon.ConfigErrorHandler errorHandler =
+ EntryCacheCommon.getConfigErrorHandler (
+ EntryCacheCommon.ConfigPhase.PHASE_APPLY, null, errorMessages
+ );
+ processEntryCacheConfig (configuration, applyChanges, errorHandler);
+
+
+ boolean adminActionRequired = false;
+ ConfigChangeResult changeResult = new ConfigChangeResult(
+ errorHandler.getResultCode(),
+ adminActionRequired,
+ errorHandler.getErrorMessages()
+ );
+
+ return changeResult;
+ }
+
+ /**
+ * Makes a best-effort attempt to apply the configuration contained in the
+ * provided entry. Information about the result of this processing should be
+ * added to the provided message list. Information should always be added to
+ * this list if a configuration change could not be applied. If detailed
+ * results are requested, then information about the changes applied
+ * successfully (and optionally about parameters that were not changed) should
+ * also be included.
+ *
+ * @param configuration The entry containing the new configuration to
+ * apply for this component.
+ * @param detailedResults Indicates whether detailed information about the
+ * processing should be added to the list.
+ *
+ * @return Information about the result of the configuration update.
+ */
+ public ConfigChangeResult applyNewConfiguration(
+ FileSystemEntryCacheCfg configuration,
+ boolean detailedResults
+ )
+ {
+ // Store the current value to detect changes.
+ long prevLockTimeout = lockTimeout;
+ long prevMaxEntries = maxEntries.longValue();
+ Set<SearchFilter> prevIncludeFilters = includeFilters;
+ Set<SearchFilter> prevExcludeFilters = excludeFilters;
+ long prevMaxAllowedMemory = maxAllowedMemory;
+ int prevJECachePercent = jeCachePercent;
+ long prevJECacheSize = jeCacheSize;
+ boolean prevPersistentCache = persistentCache;
+
+ // Activate the new configuration.
+ ConfigChangeResult changeResult = applyConfigurationChange(configuration);
+
+ // Add detailed messages if needed.
+ ResultCode resultCode = changeResult.getResultCode();
+ boolean configIsAcceptable = (resultCode == ResultCode.SUCCESS);
+ if (detailedResults && configIsAcceptable)
+ {
+ if (maxEntries.longValue() != prevMaxEntries)
+ {
+ changeResult.addMessage(
+ getMessage (MSGID_FSCACHE_UPDATED_MAX_ENTRIES, maxEntries));
+ }
+
+ if (lockTimeout != prevLockTimeout)
+ {
+ changeResult.addMessage(
+ getMessage (MSGID_FSCACHE_UPDATED_LOCK_TIMEOUT, lockTimeout));
+ }
+
+ if (!includeFilters.equals(prevIncludeFilters))
+ {
+ changeResult.addMessage(
+ getMessage (MSGID_FSCACHE_UPDATED_INCLUDE_FILTERS));
+ }
+
+ if (!excludeFilters.equals(prevExcludeFilters))
+ {
+ changeResult.addMessage(
+ getMessage (MSGID_FSCACHE_UPDATED_EXCLUDE_FILTERS));
+ }
+
+ if (maxAllowedMemory != prevMaxAllowedMemory)
+ {
+ changeResult.addMessage(
+ getMessage (MSGID_FSCACHE_UPDATED_MAX_MEMORY_SIZE,
+ maxAllowedMemory));
+ }
+
+ if (jeCachePercent != prevJECachePercent)
+ {
+ changeResult.addMessage(
+ getMessage (MSGID_FSCACHE_UPDATED_JE_MEMORY_PCT, jeCachePercent));
+ }
+
+ if (jeCacheSize != prevJECacheSize)
+ {
+ changeResult.addMessage(
+ getMessage (MSGID_FSCACHE_UPDATED_JE_MEMORY_SIZE, jeCacheSize));
+ }
+
+ if (persistentCache != prevPersistentCache)
+ {
+ changeResult.addMessage(
+ getMessage (MSGID_FSCACHE_UPDATED_IS_PERSISTENT, persistentCache));
+ }
+ }
+
+ return changeResult;
+ }
+
+ /**
+ * Parses the provided configuration and configure the entry cache.
+ *
+ * @param configuration The new configuration containing the changes.
+ * @param applyChanges If true then take into account the new configuration.
+ * @param errorHandler An handler used to report errors.
+ *
+ * @return The mapping between strings of character set values and the
+ * minimum number of characters required from those sets.
+ */
+ public boolean processEntryCacheConfig(
+ FileSystemEntryCacheCfg configuration,
+ boolean applyChanges,
+ EntryCacheCommon.ConfigErrorHandler errorHandler
+ )
+ {
+ // Local variables to read configuration.
+ DN newConfigEntryDN;
+ long newLockTimeout;
+ long newMaxEntries;
+ long newMaxAllowedMemory;
+ HashSet<SearchFilter> newIncludeFilters = null;
+ HashSet<SearchFilter> newExcludeFilters = null;
+ int newJECachePercent;
+ long newJECacheSize;
+ boolean newPersistentCache;
+ String newCacheType = DEFAULT_FSCACHE_TYPE;
+ String newCacheHome = DEFAULT_FSCACHE_HOME;
+
+ // Read configuration.
+ newConfigEntryDN = configuration.dn();
+ newLockTimeout = configuration.getLockTimeout();
+ newMaxEntries = configuration.getMaxEntries();
+
+ // Maximum memory/space this cache can utilize.
+ newMaxAllowedMemory = configuration.getMaxMemorySize();
+
+ // Determine JE cache percent.
+ newJECachePercent = configuration.getDatabaseCachePercent();
+
+ // Determine JE cache size.
+ newJECacheSize = configuration.getDatabaseCacheSize();
+
+ // Check if this cache is persistent.
+ newPersistentCache = configuration.isPersistentCache();
+
+ switch (errorHandler.getConfigPhase())
+ {
+ case PHASE_INIT:
+ // Determine the cache type.
+ newCacheType = configuration.getCacheType().toString();
+
+ // Determine the cache home.
+ newCacheHome = configuration.getCacheDirectory();
+
+ newIncludeFilters = EntryCacheCommon.getFilters(
+ configuration.getIncludeFilter(),
+ MSGID_FIFOCACHE_INVALID_INCLUDE_FILTER,
+ MSGID_FIFOCACHE_CANNOT_DECODE_ANY_INCLUDE_FILTERS,
+ errorHandler,
+ configEntryDN
+ );
+ newExcludeFilters = EntryCacheCommon.getFilters (
+ configuration.getExcludeFilter(),
+ MSGID_FIFOCACHE_CANNOT_DECODE_EXCLUDE_FILTER,
+ MSGID_FIFOCACHE_CANNOT_DECODE_ANY_EXCLUDE_FILTERS,
+ errorHandler,
+ configEntryDN
+ );
+ break;
+ case PHASE_ACCEPTABLE: // acceptable and apply are using the same
+ case PHASE_APPLY: // error ID codes
+ newIncludeFilters = EntryCacheCommon.getFilters (
+ configuration.getIncludeFilter(),
+ MSGID_FIFOCACHE_INVALID_INCLUDE_FILTER,
+ 0,
+ errorHandler,
+ configEntryDN
+ );
+ newExcludeFilters = EntryCacheCommon.getFilters (
+ configuration.getExcludeFilter(),
+ MSGID_FIFOCACHE_INVALID_EXCLUDE_FILTER,
+ 0,
+ errorHandler,
+ configEntryDN
+ );
+ break;
+ }
+
+ if (applyChanges && errorHandler.getIsAcceptable())
+ {
+ switch (errorHandler.getConfigPhase()) {
+ case PHASE_INIT:
+ cacheType = newCacheType;
+ cacheHome = newCacheHome;
+ jeCachePercent = newJECachePercent;
+ jeCacheSize = newJECacheSize;
+ break;
+ case PHASE_APPLY:
+ jeCachePercent = newJECachePercent;
+ try {
+ EnvironmentConfig envConfig = entryCacheEnv.getConfig();
+ envConfig.setCachePercent(jeCachePercent);
+ entryCacheEnv.setMutableConfig(envConfig);
+ entryCacheEnv.evictMemory();
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ errorHandler.reportError(
+ ErrorLogCategory.CONFIGURATION,
+ ErrorLogSeverity.SEVERE_WARNING,
+ MSGID_FSCACHE_CANNOT_SET_JE_MEMORY_PCT,
+ String.valueOf(configEntryDN),
+ stackTraceToSingleLineString(e),
+ null,
+ false,
+ ResultCode.OPERATIONS_ERROR
+ );
+ }
+ jeCacheSize = newJECacheSize;
+ try {
+ EnvironmentConfig envConfig = entryCacheEnv.getConfig();
+ envConfig.setCacheSize(jeCacheSize);
+ entryCacheEnv.setMutableConfig(envConfig);
+ entryCacheEnv.evictMemory();
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ errorHandler.reportError(
+ ErrorLogCategory.CONFIGURATION,
+ ErrorLogSeverity.SEVERE_WARNING,
+ MSGID_FSCACHE_CANNOT_SET_JE_MEMORY_SIZE,
+ String.valueOf(configEntryDN),
+ stackTraceToSingleLineString(e),
+ null,
+ false,
+ ResultCode.OPERATIONS_ERROR
+ );
+ }
+ break;
+ }
+
+ configEntryDN = newConfigEntryDN;
+ lockTimeout = newLockTimeout;
+ maxEntries = new AtomicLong(newMaxEntries);
+ maxAllowedMemory = newMaxAllowedMemory;
+ includeFilters = newIncludeFilters;
+ excludeFilters = newExcludeFilters;
+ persistentCache = newPersistentCache;
+ }
+
+ return errorHandler.getIsAcceptable();
+ }
+
+ /**
+ * Retrieves and decodes the entry with the specified DN from JE backend db.
+ *
+ * @param entryDN The DN of the entry to retrieve.
+ *
+ * @return The requested entry if it is present in the cache, or
+ * <CODE>null</CODE> if it is not present.
+ */
+ private Entry getEntryFromDB(DN entryDN)
+ {
+ DatabaseEntry cacheEntryKey = new DatabaseEntry();
+ DatabaseEntry primaryData = new DatabaseEntry();
+
+ try {
+ // Get the primary key and data.
+ cacheEntryKey.setData(entryDN.toNormalizedString().getBytes("UTF-8"));
+ if (entryCacheDB.get(null, cacheEntryKey,
+ primaryData,
+ LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+
+ return decodeEntry(entryDN, primaryData.getData());
+ } else {
+ throw new Exception();
+ }
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // Log an error message.
+ logError(ErrorLogCategory.EXTENSIONS, ErrorLogSeverity.SEVERE_ERROR,
+ MSGID_FSCACHE_CANNOT_RETRIEVE_ENTRY,
+ stackTraceToSingleLineString(e));
+ }
+ return null;
+ }
+
+ /**
+ * Encodes and stores the entry in the JE backend db.
+ *
+ * @param entry The entry to store in the cache.
+ * @param backend The backend with which the entry is associated.
+ * @param entryID The entry ID within the provided backend that uniquely
+ * identifies the specified entry.
+ *
+ * @return <CODE>false</CODE> if some problem prevented the method from
+ * completing successfully, or <CODE>true</CODE> if the entry
+ * was either stored or the cache determined that this entry
+ * should never be cached for some reason.
+ */
+ private boolean putEntryToDB(Entry entry, Backend backend, long entryID) {
+ try {
+ // See if the current fs space usage is within acceptable constraints. If
+ // so, then add the entry to the cache (or replace it if it is already
+ // present). If not, then remove an existing entry and don't add the new
+ // entry.
+ long usedMemory = 0;
+
+ // Zero means unlimited here.
+ if (maxAllowedMemory != 0) {
+ // TODO: This should be done using JE public API
+ // EnvironmentStats.getTotalLogSize() when JE 3.2.28 is available.
+ EnvironmentImpl envImpl =
+ DbInternal.envGetEnvironmentImpl(entryCacheEnv);
+ UtilizationProfile utilProfile = envImpl.getUtilizationProfile();
+ SortedMap map = utilProfile.getFileSummaryMap(false);
+
+ // This calculation is not exactly precise as the last JE logfile
+ // will always be less than JELOGFILEMAX however in the interest
+ // of performance and the fact that JE environment is always a
+ // moving target we will allow for that margin of error here.
+ usedMemory = map.size() * JELOGFILEMAX.longValue();
+
+ // TODO: Check and log a warning if usedMemory hits default or
+ // configurable watermark, see Issue 1735.
+
+ if (usedMemory > maxAllowedMemory) {
+ long savedMaxEntries = maxEntries.longValue();
+ // Cap maxEntries artificially but dont let it go negative under
+ // any circumstances.
+ maxEntries.set((dnMap.isEmpty() ? 0 : dnMap.size() - 1));
+ // Add the entry to the map to trigger remove of the eldest entry.
+ // @see LinkedHashMapRotator.removeEldestEntry() for more details.
+ dnMap.put(entry.getDN(), entryID);
+ // Restore the map and maxEntries.
+ dnMap.remove(entry.getDN());
+ maxEntries.set(savedMaxEntries);
+ // We'll always return true in this case, even tho we didn't actually
+ // add the entry due to memory constraints.
+ return true;
+ }
+ }
+
+ // Create key.
+ DatabaseEntry cacheEntryKey = new DatabaseEntry();
+ cacheEntryKey.setData(
+ entry.getDN().toNormalizedString().getBytes("UTF-8"));
+
+ // Create data and put this cache entry into the database.
+ if (entryCacheDB.put(null, cacheEntryKey,
+ new DatabaseEntry(encodeEntry(entry))) == OperationStatus.SUCCESS) {
+
+ // Add the entry to the cache maps.
+ dnMap.put(entry.getDN(), entryID);
+ Map<Long,DN> map = backendMap.get(backend);
+ if (map == null) {
+ map = new LinkedHashMap<Long,DN>();
+ map.put(entryID, entry.getDN());
+ backendMap.put(backend, map);
+ } else {
+ map.put(entryID, entry.getDN());
+ }
+ }
+
+ // We'll always return true in this case, even if we didn't actually add
+ // the entry due to memory constraints.
+ return true;
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+
+ // Log an error message.
+ logError(ErrorLogCategory.EXTENSIONS, ErrorLogSeverity.SEVERE_ERROR,
+ MSGID_FSCACHE_CANNOT_STORE_ENTRY,
+ stackTraceToSingleLineString(e));
+
+ return false;
+ }
+ }
+
+ /**
+ * TODO: Custom encoding used here due to performance and space
+ * considerations. The caller should use Entry.encode() method,
+ * see Issue 1675.
+ */
+ private static byte[] encodeEntry(Entry entry)
+ {
+ // Encode cache entry.
+ int totalBytes = 0;
+
+ // The object classes will be encoded as one-to-five byte length
+ // followed by a zero-delimited UTF-8 byte representation of the
+ // names (e.g., top\0person\0organizationalPerson\0inetOrgPerson).
+ int i=0;
+ int totalOCBytes = entry.getObjectClasses().size() - 1;
+ byte[][] ocBytes = new byte[entry.getObjectClasses().size()][];
+ for (String ocName : entry.getObjectClasses().values()) {
+ ocBytes[i] = getBytes(ocName);
+ totalOCBytes += ocBytes[i++].length;
+ }
+ byte[] ocLength = ASN1Element.encodeLength(totalOCBytes);
+ totalBytes += totalOCBytes + ocLength.length;
+
+
+ // The user attributes will be encoded as a one-to-five byte
+ // number of attributes followed by a sequence of:
+ // - A UTF-8 byte representation of the attribute name.
+ // - A zero delimiter
+ // - A one-to-five byte number of values for the attribute
+ // - A sequence of:
+ // - A one-to-five byte length for the value
+ // - A UTF-8 byte representation for the value
+ i=0;
+ int numUserAttributes = 0;
+ int totalUserAttrBytes = 0;
+ LinkedList<byte[]> userAttrBytes = new LinkedList<byte[]>();
+ for (List<Attribute> attrList :
+ entry.getUserAttributes().values()) {
+ for (Attribute a : attrList) {
+ if (a.isVirtual() || (! a.hasValue())) {
+ continue;
+ }
+
+ numUserAttributes++;
+
+ byte[] nameBytes = getBytes(a.getNameWithOptions());
+
+ int numValues = 0;
+ int totalValueBytes = 0;
+ LinkedList<byte[]> valueBytes = new LinkedList<byte[]>();
+ for (AttributeValue v : a.getValues()) {
+ numValues++;
+ byte[] vBytes = v.getValueBytes();
+ byte[] vLength = ASN1Element.encodeLength(vBytes.length);
+ valueBytes.add(vLength);
+ valueBytes.add(vBytes);
+ totalValueBytes += vLength.length + vBytes.length;
+ }
+ byte[] numValuesBytes = ASN1Element.encodeLength(numValues);
+
+ byte[] attrBytes = new byte[nameBytes.length +
+ numValuesBytes.length +
+ totalValueBytes + 1];
+ System.arraycopy(nameBytes, 0, attrBytes, 0,
+ nameBytes.length);
+
+ int pos = nameBytes.length+1;
+ System.arraycopy(numValuesBytes, 0, attrBytes, pos,
+ numValuesBytes.length);
+ pos += numValuesBytes.length;
+ for (byte[] b : valueBytes) {
+ System.arraycopy(b, 0, attrBytes, pos, b.length);
+ pos += b.length;
+ }
+
+ userAttrBytes.add(attrBytes);
+ totalUserAttrBytes += attrBytes.length;
+ }
+ }
+ byte[] userAttrCount =
+ ASN1OctetString.encodeLength(numUserAttributes);
+ totalBytes += totalUserAttrBytes + userAttrCount.length;
+
+ // The operational attributes will be encoded in the same way as
+ // the user attributes.
+ i=0;
+ int numOperationalAttributes = 0;
+ int totalOperationalAttrBytes = 0;
+ LinkedList<byte[]> operationalAttrBytes =
+ new LinkedList<byte[]>();
+ for (List<Attribute> attrList :
+ entry.getOperationalAttributes().values()) {
+ for (Attribute a : attrList) {
+ if (a.isVirtual() || (! a.hasValue())) {
+ continue;
+ }
+
+ numOperationalAttributes++;
+
+ byte[] nameBytes = getBytes(a.getNameWithOptions());
+
+ int numValues = 0;
+ int totalValueBytes = 0;
+ LinkedList<byte[]> valueBytes = new LinkedList<byte[]>();
+ for (AttributeValue v : a.getValues()) {
+ numValues++;
+ byte[] vBytes = v.getValueBytes();
+ byte[] vLength = ASN1Element.encodeLength(vBytes.length);
+ valueBytes.add(vLength);
+ valueBytes.add(vBytes);
+ totalValueBytes += vLength.length + vBytes.length;
+ }
+ byte[] numValuesBytes = ASN1Element.encodeLength(numValues);
+
+ byte[] attrBytes = new byte[nameBytes.length +
+ numValuesBytes.length +
+ totalValueBytes + 1];
+ System.arraycopy(nameBytes, 0, attrBytes, 0,
+ nameBytes.length);
+
+ int pos = nameBytes.length+1;
+ System.arraycopy(numValuesBytes, 0, attrBytes, pos,
+ numValuesBytes.length);
+ pos += numValuesBytes.length;
+ for (byte[] b : valueBytes) {
+ System.arraycopy(b, 0, attrBytes, pos, b.length);
+ pos += b.length;
+ }
+
+ operationalAttrBytes.add(attrBytes);
+ totalOperationalAttrBytes += attrBytes.length;
+ }
+ }
+ byte[] operationalAttrCount =
+ ASN1OctetString.encodeLength(numOperationalAttributes);
+ totalBytes += totalOperationalAttrBytes +
+ operationalAttrCount.length;
+
+
+ // Now we've got all the data that we need. Create a big byte
+ // array to hold it all and pack it in.
+ byte[] entryBytes = new byte[totalBytes];
+
+ int pos = 0;
+
+ // Add the object classes length and values.
+ System.arraycopy(ocLength, 0, entryBytes, pos, ocLength.length);
+ pos += ocLength.length;
+ for (byte[] b : ocBytes) {
+ System.arraycopy(b, 0, entryBytes, pos, b.length);
+ pos += b.length + 1;
+ }
+
+ // We need to back up one because there's no zero-teriminator
+ // after the last object class name.
+ pos--;
+
+ // Next, add the user attribute count and the user attribute
+ // data.
+ System.arraycopy(userAttrCount, 0, entryBytes, pos,
+ userAttrCount.length);
+ pos += userAttrCount.length;
+ for (byte[] b : userAttrBytes) {
+ System.arraycopy(b, 0, entryBytes, pos, b.length);
+ pos += b.length;
+ }
+
+ // Finally, add the operational attribute count and the
+ // operational attribute data.
+ System.arraycopy(operationalAttrCount, 0, entryBytes, pos,
+ operationalAttrCount.length);
+ pos += operationalAttrCount.length;
+ for (byte[] b : operationalAttrBytes) {
+ System.arraycopy(b, 0, entryBytes, pos, b.length);
+ pos += b.length;
+ }
+
+ return entryBytes;
+ }
+
+ /**
+ * TODO: Custom decoding used here due to performance and space
+ * considerations. The caller should use Entry.decode() method,
+ * see Issue 1675.
+ */
+ private static Entry decodeEntry(DN entryDN, byte[] entryBytes)
+ throws UnsupportedEncodingException
+ {
+ // Decode cache entry.
+ int pos = 0;
+ // The length of the object classes. It may be a single
+ // byte or multiple bytes.
+ int ocLength = entryBytes[pos] & 0x7F;
+ if (entryBytes[pos++] != ocLength) {
+ int numLengthBytes = ocLength;
+ ocLength = 0;
+ for (int i=0; i < numLengthBytes; i++, pos++) {
+ ocLength = (ocLength << 8) | (entryBytes[pos] & 0xFF);
+ }
+ }
+
+ // Next is the encoded set of object classes. It will be a
+ // single string with the object class names separated by zeros.
+ LinkedHashMap<ObjectClass,String> objectClasses =
+ new LinkedHashMap<ObjectClass,String>();
+ int startPos = pos;
+ for (int i=0; i < ocLength; i++,pos++) {
+ if (entryBytes[pos] == 0x00) {
+ String name = new String(entryBytes, startPos, pos-startPos,
+ "UTF-8");
+ String lowerName = toLowerCase(name);
+ ObjectClass oc =
+ DirectoryServer.getObjectClass(lowerName, true);
+ objectClasses.put(oc, name);
+ startPos = pos+1;
+ }
+ }
+ String name = new String(entryBytes, startPos, pos-startPos,
+ "UTF-8");
+ String lowerName = toLowerCase(name);
+ ObjectClass oc =
+ DirectoryServer.getObjectClass(lowerName, true);
+ objectClasses.put(oc, name);
+
+ // Next is the total number of user attributes. It may be a
+ // single byte or multiple bytes.
+ int numUserAttrs = entryBytes[pos] & 0x7F;
+ if (entryBytes[pos++] != numUserAttrs) {
+ int numLengthBytes = numUserAttrs;
+ numUserAttrs = 0;
+ for (int i=0; i < numLengthBytes; i++, pos++) {
+ numUserAttrs = (numUserAttrs << 8) |
+ (entryBytes[pos] & 0xFF);
+ }
+ }
+
+ // Now, we should iterate through the user attributes and decode
+ // each one.
+ LinkedHashMap<AttributeType,List<Attribute>> userAttributes =
+ new LinkedHashMap<AttributeType,List<Attribute>>();
+ for (int i=0; i < numUserAttrs; i++) {
+ // First, we have the zero-terminated attribute name.
+ startPos = pos;
+ while (entryBytes[pos] != 0x00) {
+ pos++;
+ }
+ name = new String(entryBytes, startPos, pos-startPos,
+ "UTF-8");
+ LinkedHashSet<String> options;
+ int semicolonPos = name.indexOf(';');
+ if (semicolonPos > 0) {
+ String baseName = name.substring(0, semicolonPos);
+ lowerName = toLowerCase(baseName);
+ options = new LinkedHashSet<String>();
+
+ int nextPos = name.indexOf(';', semicolonPos+1);
+ while (nextPos > 0) {
+ String option = name.substring(semicolonPos+1, nextPos);
+ if (option.length() > 0) {
+ options.add(option);
+ }
+
+ semicolonPos = nextPos;
+ nextPos = name.indexOf(';', semicolonPos+1);
+ }
+
+ String option = name.substring(semicolonPos+1);
+ if (option.length() > 0) {
+ options.add(option);
+ }
+
+ name = baseName;
+ } else {
+ lowerName = toLowerCase(name);
+ options = new LinkedHashSet<String>(0);
+ }
+ AttributeType attributeType =
+ DirectoryServer.getAttributeType(lowerName, true);
+
+ // Next, we have the number of values.
+ int numValues = entryBytes[++pos] & 0x7F;
+ if (entryBytes[pos++] != numValues) {
+ int numLengthBytes = numValues;
+ numValues = 0;
+ for (int j=0; j < numLengthBytes; j++, pos++) {
+ numValues = (numValues << 8) | (entryBytes[pos] & 0xFF);
+ }
+ }
+
+ // Next, we have the sequence of length-value pairs.
+ LinkedHashSet<AttributeValue> values =
+ new LinkedHashSet<AttributeValue>(numValues);
+ for (int j=0; j < numValues; j++) {
+ int valueLength = entryBytes[pos] & 0x7F;
+ if (entryBytes[pos++] != valueLength) {
+ int numLengthBytes = valueLength;
+ valueLength = 0;
+ for (int k=0; k < numLengthBytes; k++, pos++) {
+ valueLength = (valueLength << 8) |
+ (entryBytes[pos] & 0xFF);
+ }
+ }
+
+ byte[] valueBytes = new byte[valueLength];
+ System.arraycopy(entryBytes, pos, valueBytes, 0,
+ valueLength);
+ values.add(new AttributeValue(attributeType,
+ new ASN1OctetString(valueBytes)));
+ pos += valueLength;
+ }
+
+ // Create the attribute and add it to the set of user
+ // attributes.
+ Attribute a = new Attribute(attributeType, name, options,
+ values);
+ List<Attribute> attrList = userAttributes.get(attributeType);
+ if (attrList == null) {
+ attrList = new ArrayList<Attribute>(1);
+ attrList.add(a);
+ userAttributes.put(attributeType, attrList);
+ } else {
+ attrList.add(a);
+ }
+ }
+
+ // Next is the total number of operational attributes. It may
+ // be a single byte or multiple bytes.
+ int numOperationalAttrs = entryBytes[pos] & 0x7F;
+ if (entryBytes[pos++] != numOperationalAttrs) {
+ int numLengthBytes = numOperationalAttrs;
+ numOperationalAttrs = 0;
+ for (int i=0; i < numLengthBytes; i++, pos++) {
+ numOperationalAttrs =
+ (numOperationalAttrs << 8) | (entryBytes[pos] & 0xFF);
+ }
+ }
+
+ // Now, we should iterate through the operational attributes and
+ // decode each one.
+ LinkedHashMap<AttributeType,List<Attribute>>
+ operationalAttributes =
+ new LinkedHashMap<AttributeType,List<Attribute>>();
+ for (int i=0; i < numOperationalAttrs; i++) {
+ // First, we have the zero-terminated attribute name.
+ startPos = pos;
+ while (entryBytes[pos] != 0x00) {
+ pos++;
+ }
+ name = new String(entryBytes, startPos, pos-startPos,
+ "UTF-8");
+ LinkedHashSet<String> options;
+ int semicolonPos = name.indexOf(';');
+ if (semicolonPos > 0) {
+ String baseName = name.substring(0, semicolonPos);
+ lowerName = toLowerCase(baseName);
+ options = new LinkedHashSet<String>();
+
+ int nextPos = name.indexOf(';', semicolonPos+1);
+ while (nextPos > 0) {
+ String option = name.substring(semicolonPos+1, nextPos);
+ if (option.length() > 0) {
+ options.add(option);
+ }
+
+ semicolonPos = nextPos;
+ nextPos = name.indexOf(';', semicolonPos+1);
+ }
+
+ String option = name.substring(semicolonPos+1);
+ if (option.length() > 0) {
+ options.add(option);
+ }
+
+ name = baseName;
+ } else {
+ lowerName = toLowerCase(name);
+ options = new LinkedHashSet<String>(0);
+ }
+ AttributeType attributeType =
+ DirectoryServer.getAttributeType(lowerName, true);
+
+ // Next, we have the number of values.
+ int numValues = entryBytes[++pos] & 0x7F;
+ if (entryBytes[pos++] != numValues) {
+ int numLengthBytes = numValues;
+ numValues = 0;
+ for (int j=0; j < numLengthBytes; j++, pos++) {
+ numValues = (numValues << 8) | (entryBytes[pos] & 0xFF);
+ }
+ }
+
+ // Next, we have the sequence of length-value pairs.
+ LinkedHashSet<AttributeValue> values =
+ new LinkedHashSet<AttributeValue>(numValues);
+ for (int j=0; j < numValues; j++) {
+ int valueLength = entryBytes[pos] & 0x7F;
+ if (entryBytes[pos++] != valueLength) {
+ int numLengthBytes = valueLength;
+ valueLength = 0;
+ for (int k=0; k < numLengthBytes; k++, pos++) {
+ valueLength = (valueLength << 8) |
+ (entryBytes[pos] & 0xFF);
+ }
+ }
+
+ byte[] valueBytes = new byte[valueLength];
+ System.arraycopy(entryBytes, pos, valueBytes, 0,
+ valueLength);
+ values.add(new AttributeValue(attributeType,
+ new ASN1OctetString(valueBytes)));
+ pos += valueLength;
+ }
+
+ // Create the attribute and add it to the set of operational
+ // attributes.
+ Attribute a = new Attribute(attributeType, name, options,
+ values);
+ List<Attribute> attrList =
+ operationalAttributes.get(attributeType);
+ if (attrList == null) {
+ attrList = new ArrayList<Attribute>(1);
+ attrList.add(a);
+ operationalAttributes.put(attributeType, attrList);
+ } else {
+ attrList.add(a);
+ }
+ }
+
+ // We've got everything that we need, so create and return the
+ // entry.
+ return new
+ Entry(entryDN, objectClasses, userAttributes, operationalAttributes);
+ }
+
+ /**
+ * Checks if the cache home exist and if not tries to recursively create it.
+ * If either is successful adjusts cache home access permissions accordingly
+ * to allow only process owner or the superuser to access JE environment.
+ *
+ * @param cacheHome String representation of complete file system path.
+ *
+ * @throws Exception If failed to establish cache home.
+ */
+ private void checkAndSetupCacheHome(String cacheHome) throws Exception {
+
+ boolean cacheHasHome = false;
+ File cacheHomeDir = new File(cacheHome);
+ if (cacheHomeDir.exists() &&
+ cacheHomeDir.canRead() &&
+ cacheHomeDir.canWrite()) {
+ cacheHasHome = true;
+ } else {
+ try {
+ cacheHasHome = cacheHomeDir.mkdirs();
+ } catch (SecurityException e) {
+ cacheHasHome = false;
+ }
+ }
+ if ( cacheHasHome ) {
+ // TODO: Investigate if its feasible to employ SetFileAttributes()
+ // FILE_ATTRIBUTE_TEMPORARY attribute on Windows via native code.
+ if(FilePermission.canSetPermissions()) {
+ try {
+ if(!FilePermission.setPermissions(cacheHomeDir,
+ CACHE_HOME_PERMISSIONS)) {
+ throw new Exception();
+ }
+ } catch(Exception e) {
+ // Log an warning that the permissions were not set.
+ int msgID = MSGID_FSCACHE_SET_PERMISSIONS_FAILED;
+ String message = getMessage(msgID, cacheHome);
+ logError(ErrorLogCategory.EXTENSIONS, ErrorLogSeverity.SEVERE_WARNING,
+ message, msgID);
+ }
+ }
+ } else {
+ throw new Exception();
+ }
+ }
+
+ /**
+ * This inner class exist solely to override <CODE>removeEldestEntry()</CODE>
+ * method of the LinkedHashMap.
+ *
+ * @see java.util.LinkedHashMap<K,V>
+ */
+ private class LinkedHashMapRotator<K,V> extends LinkedHashMap<K,V> {
+
+ static final long serialVersionUID = 5271482121415968435L;
+
+ public LinkedHashMapRotator(int initialCapacity,
+ float loadFactor,
+ boolean accessOrder) {
+ super(initialCapacity, loadFactor, accessOrder);
+ }
+
+ // This method will get called each time we add a new key/value
+ // pair to the map. The eldest entry will be selected by the
+ // underlying LinkedHashMap implementation based on the access
+ // order configured and will follow either FIFO implementation
+ // by default or LRU implementation if configured so explicitly.
+ @Override protected boolean removeEldestEntry(Map.Entry eldest) {
+ // Check if we hit the limit on max entries and if so remove
+ // the eldest entry otherwise do nothing.
+ if (size() > maxEntries.longValue()) {
+ DatabaseEntry cacheEntryKey = new DatabaseEntry();
+ cacheWriteLock.lock();
+ try {
+ // Remove the the eldest entry from supporting maps.
+ cacheEntryKey.setData(
+ ((DN) eldest.getKey()).toNormalizedString().getBytes("UTF-8"));
+ long entryID = (long) ((Long) eldest.getValue()).longValue();
+ Set<Backend> backendSet = backendMap.keySet();
+ Iterator<Backend> backendIterator = backendSet.iterator();
+ while (backendIterator.hasNext()) {
+ Map<Long,DN> map = backendMap.get(backendIterator.next());
+ map.remove(entryID);
+ }
+ // Remove the the eldest entry from the database.
+ entryCacheDB.delete(null, cacheEntryKey);
+ } catch (Exception e) {
+ if (debugEnabled()) {
+ TRACER.debugCaught(DebugLogLevel.ERROR, e);
+ }
+ } finally {
+ cacheWriteLock.unlock();
+ }
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+
+ /**
+ * This exception should be thrown if an error occurs while
+ * trying to locate and load persistent cache index from
+ * the existing entry cache database.
+ */
+ private class CacheIndexNotFoundException extends Exception {
+ static final long serialVersionUID = 6444756053577853869L;
+ public CacheIndexNotFoundException() {}
+ public CacheIndexNotFoundException(String message) {
+ super(message);
+ }
+ }
+
+ /**
+ * This exception should be thrown if persistent cache index
+ * found in the existing entry cache database is determined
+ * to be empty, inconsistent or damaged.
+ */
+ private class CacheIndexImpairedException extends Exception {
+ static final long serialVersionUID = -369455697709478407L;
+ public CacheIndexImpairedException() {}
+ public CacheIndexImpairedException(String message) {
+ super(message);
+ }
+ }
+
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/extensions/FileSystemEntryCacheIndex.java b/opendj-sdk/opends/src/server/org/opends/server/extensions/FileSystemEntryCacheIndex.java
new file mode 100644
index 0000000..07994e7
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/extensions/FileSystemEntryCacheIndex.java
@@ -0,0 +1,73 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE. If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ * Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Portions Copyright 2007 Sun Microsystems, Inc.
+ */
+
+package org.opends.server.extensions;
+
+import java.io.Serializable;
+import java.util.Map;
+import java.util.LinkedHashMap;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * This class represents serializable entry cache index structures
+ * and supporting data types used for the entry cache persistence.
+ * Structurally it should be an inner class of FileSystemEntryCache
+ * however due to serialization constraints it has been separated.
+ */
+class FileSystemEntryCacheIndex implements Serializable {
+ static final long serialVersionUID = 4537634108673038611L;
+
+ /**
+ * Backend to Checksum/id map for offline state.
+ */
+ public Map<String,Long> offlineState;
+
+ /**
+ * The mapping between entry backends/IDs and DNs.
+ */
+ public Map<String,Map<Long,String>> backendMap;
+
+ /**
+ * The mapping between DNs and IDs.
+ */
+ public Map<String,Long> dnMap;
+
+ /**
+ * Default constructor.
+ */
+ public FileSystemEntryCacheIndex() {
+ offlineState =
+ // <Backend,Long>
+ new ConcurrentHashMap<String,Long>();
+ backendMap =
+ // <Backend,Map<Long,DN>>
+ new LinkedHashMap<String,Map<Long,String>>();
+ dnMap =
+ // <DN,Long>
+ new LinkedHashMap<String,Long>();
+ }
+ }
diff --git a/opendj-sdk/opends/src/server/org/opends/server/messages/ExtensionsMessages.java b/opendj-sdk/opends/src/server/org/opends/server/messages/ExtensionsMessages.java
index 35d89b6..bbb588d 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/messages/ExtensionsMessages.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/messages/ExtensionsMessages.java
@@ -5013,6 +5013,202 @@
/**
+ * The message ID for the message that will be used if maximum number of cache
+ * entries has been updated. This takes a single argument, which is the new
+ * maximum number of entries.
+ */
+ public static final int MSGID_FSCACHE_UPDATED_MAX_ENTRIES =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 477;
+
+ /**
+ * The message ID for the message that will be used if the cache lock timeout
+ * has been updated. This takes a single argument, which is the new lock
+ * timeout.
+ */
+ public static final int MSGID_FSCACHE_UPDATED_LOCK_TIMEOUT =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 478;
+
+ /**
+ * The message ID for the message that will be used if the cache include
+ * filter set has been updated. This does not take any arguments.
+ */
+ public static final int MSGID_FSCACHE_UPDATED_INCLUDE_FILTERS =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 479;
+
+ /**
+ * The message ID for the message that will be used if the cache exclude
+ * filter set has been updated. This does not take any arguments.
+ */
+ public static final int MSGID_FSCACHE_UPDATED_EXCLUDE_FILTERS =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 480;
+
+ /**
+ * The message ID for the message that will be used if entry cache memory
+ * size has been updated. This takes one argument, which is the new size
+ * of memory that may be used, in bytes.
+ */
+ public static final int MSGID_FSCACHE_UPDATED_MAX_MEMORY_SIZE =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 481;
+
+ /**
+ * The message ID for the message that will be used if entry cache type
+ * has been changed. This takes one argument, which is the new entry
+ * cache type.
+ */
+ public static final int MSGID_FSCACHE_UPDATED_TYPE =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 482;
+
+
+ /**
+ * The message ID for the message that will be used if entry cache JE
+ * cache memory percent has been changed. This takes one argument,
+ * which is the new percentage of JVM memory that can be utilized.
+ */
+ public static final int MSGID_FSCACHE_UPDATED_JE_MEMORY_PCT =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 483;
+
+ /**
+ * The message ID for the message that will be used if entry cache JE
+ * cache memory size has been changed. This takes one argument,
+ * which is the new maximum size of JVM memory that can be utilized.
+ */
+ public static final int MSGID_FSCACHE_UPDATED_JE_MEMORY_SIZE =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 484;
+
+ /**
+ * The message ID for the message that will be used if entry cache
+ * persistence state has been changed. This takes one argument,
+ * which is the new persistence state.
+ */
+ public static final int MSGID_FSCACHE_UPDATED_IS_PERSISTENT =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 485;
+
+ /**
+ * The message ID for the message that will be used if an error occurs while
+ * trying to load persistent cache for the FS-based entry cache.
+ * This takes one argument which is a string representation of the exception
+ * that was caught.
+ */
+ public static final int MSGID_FSCACHE_CANNOT_LOAD_PERSISTENT_DATA =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_SEVERE_ERROR | 486;
+
+ /**
+ * The message ID for the message that will be used if an error occurs while
+ * trying to store persistence cache for the FS-based entry cache.
+ * This takes one argument which is a string representation of the exception
+ * that was caught.
+ */
+ public static final int MSGID_FSCACHE_CANNOT_STORE_PERSISTENT_DATA =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_SEVERE_ERROR | 487;
+
+ /**
+ * The message ID for the message that will be used if a fatal error occurs
+ * while trying to initialize the FS-based entry cache. This takes one
+ * argument which is a string representation of the exception that was caught.
+ */
+ public static final int MSGID_FSCACHE_CANNOT_INITIALIZE =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_FATAL_ERROR | 488;
+
+ /**
+ * The message ID for the message that will be used if we are unable to put
+ * a new entry to the cache. This takes one argument, which is a string
+ * representation of the exception that was caught.
+ */
+ public static final int MSGID_FSCACHE_CANNOT_STORE_ENTRY =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_SEVERE_ERROR | 489;
+
+ /**
+ * The message ID for the message that will be used if we are unable to get
+ * an existing entry from the cache. This takes one argument, which is a
+ * string representation of the exception that was caught.
+ */
+ public static final int MSGID_FSCACHE_CANNOT_RETRIEVE_ENTRY =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_SEVERE_ERROR | 490;
+
+ /**
+ * The message ID for the message that will be used if we are unable to set
+ * the JE cache size as percentage. This takes one argument, which is a
+ * string representation of the exception that was caught.
+ */
+ public static final int MSGID_FSCACHE_CANNOT_SET_JE_MEMORY_PCT =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_SEVERE_ERROR | 491;
+
+ /**
+ * The message ID for the message that will be used if we are unable to set
+ * the JE cache size in bytes. This takes one argument, which is a string
+ * representation of the exception that was caught.
+ */
+ public static final int MSGID_FSCACHE_CANNOT_SET_JE_MEMORY_SIZE =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_SEVERE_ERROR | 492;
+
+ /**
+ * The message ID for the message that will be used if entry cache home
+ * either cannot be created or exist but already in use by another instance
+ * This takes four arguments, which are the DN of the configuration entry,
+ * the cache-home attribute value of the configuration entry, default cache
+ * home value and a string representation of the exception that was caught.
+ */
+ public static final int MSGID_FSCACHE_INVALID_HOME =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_SEVERE_ERROR | 493;
+
+ /**
+ * The message ID for the message that will be used if a fatal error occurs
+ * while trying to setup the cache db environment home. This takes one
+ * argument which is a string representation of the exception that was caught.
+ */
+ public static final int MSGID_FSCACHE_HOMELESS =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_FATAL_ERROR | 494;
+
+ /**
+ * The message ID of an error indicating that the file permissions for the
+ * file system entry cache database directory were not set.
+ */
+ public static final int MSGID_FSCACHE_SET_PERMISSIONS_FAILED =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_SEVERE_WARNING | 495;
+
+ /**
+ * The message ID for the message that will be used if recorded offline state
+ * does not match the current offline state for given backend. This takes one
+ * argument which is the backend ID.
+ */
+ public static final int MSGID_FSCACHE_OFFLINE_STATE_FAIL =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_SEVERE_WARNING | 496;
+
+ /**
+ * The message ID for the message that will be used to report the cache
+ * persistent state restoration progress. This takes two arguments which
+ * are the restored entries count and the total number of entries to be
+ * restored.
+ */
+ public static final int MSGID_FSCACHE_RESTORE_PROGRESS_REPORT =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 497;
+
+ /**
+ * The message ID for the message that will be used to report the cache
+ * persistent state preservation progress. This takes two arguments which
+ * are the preserved entries count and the total number of entries to be
+ * preserved.
+ */
+ public static final int MSGID_FSCACHE_SAVE_PROGRESS_REPORT =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 498;
+
+ /**
+ * The message ID for the message that will be used if an error occurs while
+ * trying to load persistent cache index of the FS-based persistent entry
+ * cache. Takes no arguments.
+ */
+ public static final int MSGID_FSCACHE_INDEX_NOT_FOUND =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_INFORMATIONAL | 499;
+
+ /**
+ * The message ID for the message that will be used if persistent cache index
+ * of the FS-based persistent entry cache is empty or damaged in any way we
+ * can detect. Takes no arguments.
+ */
+ public static final int MSGID_FSCACHE_INDEX_IMPAIRED =
+ CATEGORY_MASK_EXTENSIONS | SEVERITY_MASK_SEVERE_ERROR | 500;
+
+ /**
* Associates a set of generic messages with the message IDs defined in this
* class.
*/
@@ -7205,6 +7401,90 @@
"which is itself a virtual static group. One " +
"virtual static group is not allowed to reference " +
"another as its target group");
+
+
+ registerMessage(MSGID_FSCACHE_UPDATED_MAX_MEMORY_SIZE,
+ "The amount of space that may be used for the entry " +
+ "cache has been updated to %d bytes. If the previous " +
+ "amount has been reduced, it may take some time for " +
+ "entries to be purged so that the current cache space" +
+ "consumption can reflect this new setting.");
+ registerMessage(MSGID_FSCACHE_UPDATED_MAX_ENTRIES,
+ "The number of entries that may be held in the entry " +
+ "cache has been updated to %d. If this value has been " +
+ "reduced, it may take some time for entries to be purged " +
+ "so that the cache can reflect this new setting.");
+ registerMessage(MSGID_FSCACHE_UPDATED_LOCK_TIMEOUT,
+ "The lock timeout that will be used to determine the " +
+ "length of time that the cache should block while " +
+ "attempting to acquire a lock for an entry has been " +
+ "set to %d milliseconds.");
+ registerMessage(MSGID_FSCACHE_UPDATED_INCLUDE_FILTERS,
+ "The set of search filters that will control which " +
+ "entries may be included in the cache has been updated.");
+ registerMessage(MSGID_FSCACHE_UPDATED_EXCLUDE_FILTERS,
+ "The set of search filters that will control which " +
+ "entries should be be excluded from the cache has been " +
+ "updated.");
+ registerMessage(MSGID_FSCACHE_UPDATED_TYPE,
+ "The entry cache type has been changed to %s.");
+ registerMessage(MSGID_FSCACHE_UPDATED_JE_MEMORY_PCT,
+ "The amount of memory that may be used for the entry " +
+ "cache Berkeley DB JE internal cache has been updated " +
+ "to %d percent of the total memory available to the JVM.");
+ registerMessage(MSGID_FSCACHE_UPDATED_JE_MEMORY_SIZE,
+ "The amount of JVM memory that may be used for the entry " +
+ "cache Berkeley DB JE internal cache has been updated " +
+ "to %d bytes.");
+ registerMessage(MSGID_FSCACHE_UPDATED_IS_PERSISTENT,
+ "The persistence state for the entry cache has been " +
+ "changed to %s");
+ registerMessage(MSGID_FSCACHE_CANNOT_LOAD_PERSISTENT_DATA,
+ "An error occurred while trying to load persistent cache." +
+ " Persistent cache will be flushed now.");
+ registerMessage(MSGID_FSCACHE_CANNOT_STORE_PERSISTENT_DATA,
+ "An error occurred while trying to store persistent cache" +
+ ". Persistent cache will be flushed now.");
+ registerMessage(MSGID_FSCACHE_CANNOT_INITIALIZE,
+ "A fatal error occurred while trying to initialize file " +
+ "system entry cache. This cache will be disabled now.");
+ registerMessage(MSGID_FSCACHE_HOMELESS,
+ "A fatal error occurred while trying to setup file " +
+ "system entry cache home. No suitable path can be found " +
+ "to host the cache home. This cache will be disabled now.");
+ registerMessage(MSGID_FSCACHE_SET_PERMISSIONS_FAILED,
+ "Unable to set file permissions for the file system " +
+ "entry cache backend database directory %s.");
+ registerMessage(MSGID_FSCACHE_CANNOT_STORE_ENTRY,
+ "Unable to store new cache entry in the file system " +
+ "entry cache.");
+ registerMessage(MSGID_FSCACHE_CANNOT_RETRIEVE_ENTRY,
+ "Unable to retrieve an existing cache entry from the " +
+ "file system entry cache.");
+ registerMessage(MSGID_FSCACHE_CANNOT_SET_JE_MEMORY_PCT,
+ "Internal error occured while trying to set the entry " +
+ "cache backend internal cache size as percentage. The " +
+ "previous or default value will be used instead.");
+ registerMessage(MSGID_FSCACHE_CANNOT_SET_JE_MEMORY_SIZE,
+ "Internal error occured while trying to set the entry " +
+ "cache backend internal cache size in bytes. The " +
+ "previous or default value will be used instead.");
+ registerMessage(MSGID_FSCACHE_OFFLINE_STATE_FAIL,
+ "%s backend current offline state does not match " +
+ "persistent cache last recorded offline state. All " +
+ "cached data for this backend is now discarded.");
+ registerMessage(MSGID_FSCACHE_RESTORE_PROGRESS_REPORT,
+ "Restored %d cache entries of %d total persistent cache " +
+ "entries found.");
+ registerMessage(MSGID_FSCACHE_SAVE_PROGRESS_REPORT,
+ "Made persistent %d cache entries of %d total cache " +
+ "entries found.");
+ registerMessage(MSGID_FSCACHE_INDEX_NOT_FOUND,
+ "No previous persistent cache state can be found. " +
+ "Starting with an empty cache.");
+ registerMessage(MSGID_FSCACHE_INDEX_IMPAIRED,
+ "The persistent cache index is inconsistent or damaged. " +
+ "Persistent cache will be flushed now.");
}
}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/messages/JebMessages.java b/opendj-sdk/opends/src/server/org/opends/server/messages/JebMessages.java
index 30fd871..1f3061d 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/messages/JebMessages.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/messages/JebMessages.java
@@ -1158,6 +1158,14 @@
CATEGORY_MASK_JEB | SEVERITY_MASK_SEVERE_WARNING | 146;
/**
+ * The message ID to use when we have failed to checksum db environment.
+ * This takes one argument which is the backend ID.
+ */
+ public static final int MSGID_JEB_BACKEND_CHECKSUM_FAIL =
+ CATEGORY_MASK_JEB | SEVERITY_MASK_SEVERE_WARNING | 147;
+
+
+ /**
* Associates a set of generic messages with the message IDs defined in this
* class.
*/
@@ -1466,8 +1474,9 @@
registerMessage(MSGID_JEB_REBUILD_BACKEND_ONLINE,
"Rebuilding system index(es) must be done with the " +
"backend containing the base DN disabled");
-
-
+ registerMessage(MSGID_JEB_BACKEND_CHECKSUM_FAIL,
+ "Failed to checksum the database environment for " +
+ "backend %s");
registerMessage(MSGID_ENTRYIDSORTER_CANNOT_EXAMINE_ENTRY,
"Unable to examine the entry with ID %s for sorting " +
"purposes: %s");
--
Gitblit v1.10.0