From 20fdcbef0d17440c367d2943f9c5799bddfe661f Mon Sep 17 00:00:00 2001
From: Matthew Swift <matthew.swift@forgerock.com>
Date: Tue, 07 Apr 2015 10:45:33 +0000
Subject: [PATCH] OPENDJ-1628 - Simplify Index hierarchy and remove Indexer classes
---
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexQueryFactoryImpl.java | 139 ++
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DefaultIndex.java | 379 +++++++++
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/RootContainer.java | 6
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/State.java | 3
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/VerifyJob.java | 21
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DatabaseEnvironmentMonitor.java | 4
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Importer.java | 77
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Index.java | 635 ---------------
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AttributeIndex.java | 343 +++++--
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Suffix.java | 5
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/VLVIndex.java | 16
/dev/null | 105 --
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2Entry.java | 2
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexFilter.java | 4
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryContainer.java | 217 ++---
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DN2URI.java | 2
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DatabaseContainer.java | 46
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexBuffer.java | 151 +--
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryIDSet.java | 1
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DN2ID.java | 4
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/NullIndex.java | 184 ++--
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AbstractDatabaseContainer.java | 92 ++
22 files changed, 1,157 insertions(+), 1,279 deletions(-)
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AbstractDatabaseContainer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AbstractDatabaseContainer.java
new file mode 100644
index 0000000..ff4f67a
--- /dev/null
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AbstractDatabaseContainer.java
@@ -0,0 +1,92 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
+ * or http://forgerock.org/license/CDDLv1.0.html.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at legal-notices/CDDLv1_0.txt.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information:
+ * Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2006-2008 Sun Microsystems, Inc.
+ * Portions Copyright 2011-2015 ForgeRock AS
+ */
+package org.opends.server.backends.pluggable;
+
+import org.opends.server.backends.pluggable.spi.ReadableTransaction;
+import org.opends.server.backends.pluggable.spi.StorageRuntimeException;
+import org.opends.server.backends.pluggable.spi.TreeName;
+import org.opends.server.backends.pluggable.spi.WriteableTransaction;
+
+/**
+ * This class is a wrapper around the tree object and provides basic
+ * read and write methods for entries.
+ */
+abstract class AbstractDatabaseContainer implements DatabaseContainer
+{
+ /** The name of the database within the entryContainer. */
+ private TreeName name;
+
+ AbstractDatabaseContainer(final TreeName name)
+ {
+ this.name = name;
+ }
+
+ @Override
+ public final void open(WriteableTransaction txn) throws StorageRuntimeException
+ {
+ txn.openTree(name);
+ doOpen(txn);
+ }
+
+ /**
+ * Override in order to perform any additional initialization after the index has opened.
+ */
+ void doOpen(WriteableTransaction txn) throws StorageRuntimeException
+ {
+ // Do nothing by default.
+ }
+
+ @Override
+ public final void delete(WriteableTransaction txn) throws StorageRuntimeException
+ {
+ txn.deleteTree(name);
+ }
+
+ @Override
+ public final long getRecordCount(ReadableTransaction txn) throws StorageRuntimeException
+ {
+ return txn.getRecordCount(name);
+ }
+
+ @Override
+ public final TreeName getName()
+ {
+ return name;
+ }
+
+ @Override
+ public final void setName(TreeName name)
+ {
+ this.name = name;
+ }
+
+ @Override
+ public final String toString()
+ {
+ return name.toString();
+ }
+}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AttributeIndex.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AttributeIndex.java
index 122a64f..a151ad9 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AttributeIndex.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AttributeIndex.java
@@ -41,10 +41,13 @@
import org.forgerock.opendj.config.server.ConfigChangeResult;
import org.forgerock.opendj.config.server.ConfigException;
import org.forgerock.opendj.ldap.Assertion;
+import org.forgerock.opendj.ldap.ByteSequence;
import org.forgerock.opendj.ldap.ByteString;
import org.forgerock.opendj.ldap.DecodeException;
import org.forgerock.opendj.ldap.schema.MatchingRule;
+import org.forgerock.opendj.ldap.schema.Schema;
import org.forgerock.opendj.ldap.spi.IndexQueryFactory;
+import org.forgerock.opendj.ldap.spi.Indexer;
import org.forgerock.opendj.ldap.spi.IndexingOptions;
import org.opends.server.admin.server.ConfigurationChangeListener;
import org.opends.server.admin.std.meta.BackendIndexCfgDefn.IndexType;
@@ -105,6 +108,121 @@
}
}
+ /**
+ * This class implements an attribute indexer for matching rules in JE Backend.
+ */
+ final class MatchingRuleIndex extends DefaultIndex
+ {
+ /**
+ * The matching rule's indexer.
+ */
+ private final Indexer indexer;
+
+ MatchingRuleIndex(WriteableTransaction txn, BackendIndexCfg cfg, Indexer indexer)
+ {
+ super(getIndexName(attributeType, indexer.getIndexID()), state, cfg.getIndexEntryLimit(), false, txn,
+ entryContainer);
+ this.indexer = indexer;
+ }
+
+ void indexEntry(Entry entry, Set<ByteString> keys, IndexingOptions options)
+ {
+ List<Attribute> attributes = entry.getAttribute(attributeType, true);
+ if (attributes != null)
+ {
+ indexAttribute(attributes, keys, options);
+ }
+ }
+
+ void modifyEntry(Entry oldEntry, Entry newEntry, List<Modification> mods, Map<ByteString, Boolean> modifiedKeys,
+ IndexingOptions options)
+ {
+ List<Attribute> oldAttributes = oldEntry.getAttribute(attributeType, true);
+ if (oldAttributes != null)
+ {
+ final Set<ByteString> keys = new HashSet<ByteString>();
+ indexAttribute(oldAttributes, keys, options);
+ for (ByteString key : keys)
+ {
+ modifiedKeys.put(key, false);
+ }
+ }
+
+ List<Attribute> newAttributes = newEntry.getAttribute(attributeType, true);
+ if (newAttributes != null)
+ {
+ final Set<ByteString> keys = new HashSet<ByteString>();
+ indexAttribute(newAttributes, keys, options);
+ for (ByteString key : keys)
+ {
+ final Boolean needsAdding = modifiedKeys.get(key);
+ if (needsAdding == null)
+ {
+ // This value has been added.
+ modifiedKeys.put(key, true);
+ }
+ else if (!needsAdding)
+ {
+ // This value has not been added or removed.
+ modifiedKeys.remove(key);
+ }
+ }
+ }
+ }
+
+ private void indexAttribute(List<Attribute> attributes, Set<ByteString> keys, IndexingOptions options)
+ {
+ for (Attribute attr : attributes)
+ {
+ if (!attr.isVirtual())
+ {
+ for (ByteString value : attr)
+ {
+ try
+ {
+ indexer.createKeys(Schema.getDefaultSchema(), value, options, keys);
+
+ /*
+ * Optimization for presence: return immediately after first value since all values
+ * have the same key.
+ */
+ if (indexer == PRESENCE_INDEXER)
+ {
+ return;
+ }
+ }
+ catch (DecodeException e)
+ {
+ logger.traceException(e);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /** The key bytes used for the presence index as a {@link ByteString}. */
+ static final ByteString PRESENCE_KEY = ByteString.valueOf("+");
+
+ /**
+ * A special indexer for generating presence indexes.
+ */
+ private static final Indexer PRESENCE_INDEXER = new Indexer()
+ {
+ @Override
+ public void createKeys(Schema schema, ByteSequence value, IndexingOptions options, Collection<ByteString> keys)
+ throws DecodeException
+ {
+ keys.add(PRESENCE_KEY);
+ }
+
+ @Override
+ public String getIndexID()
+ {
+ return IndexType.PRESENCE.toString();
+ }
+ };
+
/*
* FIXME Matthew Swift: Once the matching rules have been migrated we should
* revisit this class. All of the evaluateXXX methods should go (the Matcher
@@ -115,30 +233,28 @@
private final EntryContainer entryContainer;
/** The attribute index configuration. */
- private BackendIndexCfg indexConfig;
+ private BackendIndexCfg config;
/** The mapping from names to indexes. */
- private final Map<String, Index> nameToIndexes = new HashMap<String, Index>();
+ private final Map<String, MatchingRuleIndex> nameToIndexes = new HashMap<String, MatchingRuleIndex>();
private final IndexingOptions indexingOptions;
+ private final State state;
+
+ /** The attribute type for which this instance will generate index keys. */
+ private final AttributeType attributeType;
/**
* The mapping from extensible index types (e.g. "substring" or "shared") to list of indexes.
*/
- private Map<String, Collection<Index>> extensibleIndexesMapping;
+ private Map<String, Collection<MatchingRuleIndex>> extensibleIndexesMapping;
- /**
- * Create a new attribute index object.
- *
- * @param indexConfig The attribute index configuration.
- * @param entryContainer The entryContainer of this attribute index.
- * @param txn a non null database transaction
- * @throws ConfigException if a configuration related error occurs.
- */
- AttributeIndex(BackendIndexCfg indexConfig, EntryContainer entryContainer, WriteableTransaction txn)
+ AttributeIndex(BackendIndexCfg config, State state, EntryContainer entryContainer, WriteableTransaction txn)
throws ConfigException
{
this.entryContainer = entryContainer;
- this.indexConfig = indexConfig;
+ this.config = config;
+ this.state = state;
+ this.attributeType = config.getAttribute();
buildPresenceIndex(txn);
buildIndexes(txn, IndexType.EQUALITY);
@@ -147,35 +263,27 @@
buildIndexes(txn, IndexType.APPROXIMATE);
buildExtensibleIndexes(txn);
- indexingOptions = new JEIndexConfig(indexConfig.getSubstringLength());
+ indexingOptions = new IndexingOptionsImpl(config.getSubstringLength());
extensibleIndexesMapping = computeExtensibleIndexesMapping();
}
private void buildPresenceIndex(WriteableTransaction txn)
{
final IndexType indexType = IndexType.PRESENCE;
- if (indexConfig.getIndexType().contains(indexType))
+ if (config.getIndexType().contains(indexType))
{
String indexID = indexType.toString();
- nameToIndexes.put(indexID, newPresenceIndex(txn, indexConfig));
+ nameToIndexes.put(indexID, new MatchingRuleIndex(txn, config, PRESENCE_INDEXER));
}
}
- private Index newPresenceIndex(WriteableTransaction txn, BackendIndexCfg cfg)
- {
- final AttributeType attrType = cfg.getAttribute();
- final TreeName indexName = getIndexName(attrType, IndexType.PRESENCE.toString());
- final PresenceIndexer indexer = new PresenceIndexer(attrType);
- return entryContainer.newIndexForAttribute(txn, indexName, indexer, cfg.getIndexEntryLimit());
- }
-
private void buildExtensibleIndexes(WriteableTransaction txn) throws ConfigException
{
final IndexType indexType = IndexType.EXTENSIBLE;
- if (indexConfig.getIndexType().contains(indexType))
+ if (config.getIndexType().contains(indexType))
{
- final AttributeType attrType = indexConfig.getAttribute();
- Set<String> extensibleRules = indexConfig.getIndexExtensibleMatchingRule();
+ final AttributeType attrType = config.getAttribute();
+ Set<String> extensibleRules = config.getIndexExtensibleMatchingRule();
if (extensibleRules == null || extensibleRules.isEmpty())
{
throw new ConfigException(ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE.get(attrType, indexType.toString()));
@@ -193,13 +301,13 @@
logger.error(ERR_CONFIG_INDEX_TYPE_NEEDS_VALID_MATCHING_RULE, attrType, ruleName);
continue;
}
- for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers())
+ for (Indexer indexer : rule.getIndexers())
{
final String indexId = indexer.getIndexID();
if (!nameToIndexes.containsKey(indexId))
{
// There is no index available for this index id. Create a new index
- nameToIndexes.put(indexId, newAttributeIndex(txn, indexConfig, indexer));
+ nameToIndexes.put(indexId, new MatchingRuleIndex(txn, config, indexer));
}
}
}
@@ -208,9 +316,9 @@
private void buildIndexes(WriteableTransaction txn, IndexType indexType) throws ConfigException
{
- if (indexConfig.getIndexType().contains(indexType))
+ if (config.getIndexType().contains(indexType))
{
- final AttributeType attrType = indexConfig.getAttribute();
+ final AttributeType attrType = config.getAttribute();
final String indexID = indexType.toString();
final MatchingRule rule = getMatchingRule(indexType, attrType);
if (rule == null)
@@ -218,9 +326,9 @@
throw new ConfigException(ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE.get(attrType, indexID));
}
- for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers())
+ for (Indexer indexer : rule.getIndexers())
{
- nameToIndexes.put(indexID, newAttributeIndex(txn, indexConfig, indexer));
+ nameToIndexes.put(indexID, new MatchingRuleIndex(txn, config, indexer));
}
}
}
@@ -242,15 +350,6 @@
}
}
- private Index newAttributeIndex(WriteableTransaction txn, BackendIndexCfg indexConfig,
- org.forgerock.opendj.ldap.spi.Indexer indexer)
- {
- final AttributeType attrType = indexConfig.getAttribute();
- final TreeName indexName = getIndexName(attrType, indexer.getIndexID());
- final AttributeIndexer attrIndexer = new AttributeIndexer(attrType, indexer);
- return entryContainer.newIndexForAttribute(txn, indexName, attrIndexer, indexConfig.getIndexEntryLimit());
- }
-
private TreeName getIndexName(AttributeType attrType, String indexID)
{
final String attrIndexId = attrType.getNameOrOID() + "." + indexID;
@@ -269,13 +368,13 @@
{
index.open(txn);
}
- indexConfig.addChangeListener(this);
+ config.addChangeListener(this);
}
@Override
public void close()
{
- indexConfig.removeChangeListener(this);
+ config.removeChangeListener(this);
}
/**
@@ -284,7 +383,7 @@
*/
AttributeType getAttributeType()
{
- return indexConfig.getAttribute();
+ return config.getAttribute();
}
/**
@@ -303,7 +402,7 @@
*/
BackendIndexCfg getConfiguration()
{
- return indexConfig;
+ return config;
}
/**
@@ -318,9 +417,14 @@
void addEntry(IndexBuffer buffer, EntryID entryID, Entry entry)
throws StorageRuntimeException, DirectoryException
{
- for (Index index : nameToIndexes.values())
+ for (MatchingRuleIndex index : nameToIndexes.values())
{
- index.addEntry(buffer, entryID, entry, indexingOptions);
+ HashSet<ByteString> keys = new HashSet<ByteString>();
+ index.indexEntry(entry, keys, indexingOptions);
+ for (ByteString key : keys)
+ {
+ buffer.put(index, key, entryID);
+ }
}
}
@@ -336,9 +440,14 @@
void removeEntry(IndexBuffer buffer, EntryID entryID, Entry entry)
throws StorageRuntimeException, DirectoryException
{
- for (Index index : nameToIndexes.values())
+ for (MatchingRuleIndex index : nameToIndexes.values())
{
- index.removeEntry(buffer, entryID, entry, indexingOptions);
+ HashSet<ByteString> keys = new HashSet<ByteString>();
+ index.indexEntry(entry, keys, indexingOptions);
+ for (ByteString key : keys)
+ {
+ buffer.remove(index, key, entryID);
+ }
}
}
@@ -361,9 +470,21 @@
List<Modification> mods)
throws StorageRuntimeException
{
- for (Index index : nameToIndexes.values())
+ for (MatchingRuleIndex index : nameToIndexes.values())
{
- index.modifyEntry(buffer, entryID, oldEntry, newEntry, mods, indexingOptions);
+ TreeMap<ByteString, Boolean> modifiedKeys = new TreeMap<ByteString, Boolean>();
+ index.modifyEntry(oldEntry, newEntry, mods, modifiedKeys, indexingOptions);
+ for (Map.Entry<ByteString, Boolean> modifiedKey : modifiedKeys.entrySet())
+ {
+ if (modifiedKey.getValue())
+ {
+ buffer.put(index, modifiedKey.getKey(), entryID);
+ }
+ else
+ {
+ buffer.remove(index, modifiedKey.getKey(), entryID);
+ }
+ }
}
}
@@ -385,7 +506,7 @@
// concurrent writers.
Set<ByteString> set = new HashSet<ByteString>();
- int substrLength = indexConfig.getSubstringLength();
+ int substrLength = config.getSubstringLength();
// Example: The value is ABCDE and the substring length is 3.
// We produce the keys ABC BCD CDE DE E
@@ -426,7 +547,7 @@
if (debugBuffer != null)
{
- debugBuffer.append("[INDEX:").append(indexConfig.getAttribute().getNameOrOID())
+ debugBuffer.append("[INDEX:").append(config.getAttribute().getNameOrOID())
.append(".").append(indexName).append("]");
}
@@ -560,23 +681,6 @@
}
/**
- * Return the number of values that have exceeded the entry limit since this
- * object was created.
- *
- * @return The number of values that have exceeded the entry limit.
- */
- long getEntryLimitExceededCount()
- {
- long entryLimitExceededCount = 0;
-
- for (Index index : nameToIndexes.values())
- {
- entryLimitExceededCount += index.getEntryLimitExceededCount();
- }
- return entryLimitExceededCount;
- }
-
- /**
* Get a list of the databases opened by this attribute index.
* @param dbList A list of database containers.
*/
@@ -658,7 +762,7 @@
});
extensibleIndexesMapping = computeExtensibleIndexesMapping();
- indexConfig = cfg;
+ config = cfg;
}
catch(Exception e)
{
@@ -693,13 +797,13 @@
continue;
}
validRules.add(rule);
- for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers())
+ for (Indexer indexer : rule.getIndexers())
{
String indexId = indexer.getIndexID();
validIndexIds.add(indexId);
if (!nameToIndexes.containsKey(indexId))
{
- Index index = newAttributeIndex(txn, cfg, indexer);
+ MatchingRuleIndex index = new MatchingRuleIndex(txn, cfg, indexer);
openIndex(txn, index, ccr);
nameToIndexes.put(indexId, index);
}
@@ -711,10 +815,6 @@
ccr.setAdminActionRequired(true);
ccr.addMessage(NOTE_JEB_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(index.getName()));
}
- if (indexConfig.getSubstringLength() != cfg.getSubstringLength())
- {
- index.setIndexer(new AttributeIndexer(attrType, indexer));
- }
}
}
}
@@ -735,7 +835,7 @@
for (MatchingRule rule: rulesToDelete)
{
final List<String> indexIdsToRemove = new ArrayList<String>();
- for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers())
+ for (Indexer indexer : rule.getIndexers())
{
final String indexId = indexer.getIndexID();
if (!validIndexIds.contains(indexId))
@@ -765,7 +865,7 @@
private Set<MatchingRule> getCurrentExtensibleMatchingRules()
{
final Set<MatchingRule> rules = new HashSet<MatchingRule>();
- for (String ruleName : indexConfig.getIndexExtensibleMatchingRule())
+ for (String ruleName : config.getIndexExtensibleMatchingRule())
{
final MatchingRule rule = DirectoryServer.getMatchingRule(toLowerCase(ruleName));
if (rule != null)
@@ -780,7 +880,7 @@
final ConfigChangeResult ccr)
{
String indexId = indexType.toString();
- Index index = nameToIndexes.get(indexId);
+ MatchingRuleIndex index = nameToIndexes.get(indexId);
if (!cfg.getIndexType().contains(indexType))
{
removeIndex(txn, index, indexType);
@@ -790,9 +890,9 @@
if (index == null)
{
final MatchingRule matchingRule = getMatchingRule(indexType, cfg.getAttribute());
- for (org.forgerock.opendj.ldap.spi.Indexer indexer : matchingRule.getIndexers())
+ for (Indexer indexer : matchingRule.getIndexers())
{
- index = newAttributeIndex(txn, cfg, indexer);
+ index = new MatchingRuleIndex(txn, cfg, indexer);
openIndex(txn, index, ccr);
nameToIndexes.put(indexId, index);
}
@@ -805,6 +905,12 @@
ccr.setAdminActionRequired(true);
ccr.addMessage(NOTE_JEB_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(index.getName()));
}
+ if (indexType == IndexType.SUBSTRING && config.getSubstringLength() != cfg.getSubstringLength())
+ {
+ ccr.setAdminActionRequired(true);
+ // FIXME: msg?
+ ccr.addMessage(NOTE_JEB_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(index.getName()));
+ }
}
}
@@ -812,7 +918,7 @@
{
final IndexType indexType = IndexType.PRESENCE;
final String indexID = indexType.toString();
- Index index = nameToIndexes.get(indexID);
+ MatchingRuleIndex index = nameToIndexes.get(indexID);
if (!cfg.getIndexType().contains(indexType))
{
removeIndex(txn, index, indexType);
@@ -821,7 +927,7 @@
if (index == null)
{
- index = newPresenceIndex(txn, cfg);
+ index = new MatchingRuleIndex(txn, cfg, PRESENCE_INDEXER);
openIndex(txn, index, ccr);
nameToIndexes.put(indexID, index);
}
@@ -889,7 +995,7 @@
{
return entryContainer.getDatabasePrefix()
+ "_"
- + indexConfig.getAttribute().getNameOrOID();
+ + config.getAttribute().getNameOrOID();
}
/**
@@ -897,7 +1003,7 @@
*
* @return The equality index.
*/
- Index getEqualityIndex()
+ MatchingRuleIndex getEqualityIndex()
{
return getIndexById(IndexType.EQUALITY.toString());
}
@@ -907,7 +1013,7 @@
*
* @return The approximate index.
*/
- Index getApproximateIndex()
+ MatchingRuleIndex getApproximateIndex()
{
return getIndexById(IndexType.APPROXIMATE.toString());
}
@@ -917,7 +1023,7 @@
*
* @return The ordering index.
*/
- Index getOrderingIndex()
+ MatchingRuleIndex getOrderingIndex()
{
return getIndexById(IndexType.ORDERING.toString());
}
@@ -927,7 +1033,7 @@
*
* @return The substring index.
*/
- Index getSubstringIndex()
+ MatchingRuleIndex getSubstringIndex()
{
return getIndexById(IndexType.SUBSTRING.toString());
}
@@ -937,7 +1043,7 @@
*
* @return The presence index.
*/
- Index getPresenceIndex()
+ MatchingRuleIndex getPresenceIndex()
{
return getIndexById(IndexType.PRESENCE.toString());
}
@@ -953,7 +1059,7 @@
* @return The index identified by the provided identifier, or null if no such
* index exists
*/
- Index getIndexById(String indexId)
+ MatchingRuleIndex getIndexById(String indexId)
{
return nameToIndexes.get(indexId);
}
@@ -963,16 +1069,16 @@
*
* @return The map containing entries (extensible index type, list of indexes)
*/
- Map<String, Collection<Index>> getExtensibleIndexes()
+ Map<String, Collection<MatchingRuleIndex>> getExtensibleIndexes()
{
return extensibleIndexesMapping;
}
- private Map<String, Collection<Index>> computeExtensibleIndexesMapping()
+ private Map<String, Collection<MatchingRuleIndex>> computeExtensibleIndexesMapping()
{
- final Collection<Index> substring = new ArrayList<Index>();
- final Collection<Index> shared = new ArrayList<Index>();
- for (Map.Entry<String, Index> entry : nameToIndexes.entrySet())
+ final Collection<MatchingRuleIndex> substring = new ArrayList<MatchingRuleIndex>();
+ final Collection<MatchingRuleIndex> shared = new ArrayList<MatchingRuleIndex>();
+ for (Map.Entry<String, MatchingRuleIndex> entry : nameToIndexes.entrySet())
{
final String indexId = entry.getKey();
if (isDefaultIndex(indexId)) {
@@ -987,7 +1093,7 @@
shared.add(entry.getValue());
}
}
- final Map<String, Collection<Index>> indexMap = new HashMap<String,Collection<Index>>();
+ final Map<String, Collection<MatchingRuleIndex>> indexMap = new HashMap<String, Collection<MatchingRuleIndex>>();
indexMap.put(EXTENSIBLE_INDEXER_ID_SUBSTRING, substring);
indexMap.put(EXTENSIBLE_INDEXER_ID_SHARED, shared);
return Collections.unmodifiableMap(indexMap);
@@ -1037,7 +1143,7 @@
* 1. There is no matching rule provided
* 2. The matching rule specified is actually the default equality.
*/
- MatchingRule eqRule = indexConfig.getAttribute().getEqualityMatchingRule();
+ MatchingRule eqRule = config.getAttribute().getEqualityMatchingRule();
if (matchRuleOID == null
|| matchRuleOID.equals(eqRule.getOID())
|| matchRuleOID.equalsIgnoreCase(eqRule.getNameOrOID()))
@@ -1047,12 +1153,12 @@
}
MatchingRule rule = DirectoryServer.getMatchingRule(matchRuleOID);
- if (!ruleHasAtLeasOneIndex(rule))
+ if (!ruleHasAtLeastOneIndex(rule))
{
if (monitor.isFilterUseEnabled())
{
monitor.updateStats(filter, INFO_JEB_INDEX_FILTER_MATCHING_RULE_NOT_INDEXED.get(
- matchRuleOID, indexConfig.getAttribute().getNameOrOID()));
+ matchRuleOID, config.getAttribute().getNameOrOID()));
}
return IndexQuery.createNullIndexQuery().evaluate(null);
}
@@ -1062,7 +1168,7 @@
if (debugBuffer != null)
{
debugBuffer.append("[INDEX:");
- for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers())
+ for (Indexer indexer : rule.getIndexers())
{
debugBuffer.append(" ")
.append(filter.getAttributeType().getNameOrOID())
@@ -1095,9 +1201,9 @@
}
}
- private boolean ruleHasAtLeasOneIndex(MatchingRule rule)
+ private boolean ruleHasAtLeastOneIndex(MatchingRule rule)
{
- for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers())
+ for (Indexer indexer : rule.getIndexers())
{
if (nameToIndexes.containsKey(indexer.getIndexID()))
{
@@ -1107,26 +1213,31 @@
return false;
}
- /** This class extends the IndexConfig for JE Backend. */
- private final class JEIndexConfig implements IndexingOptions
+ /** Indexing options implementation. */
+ private final class IndexingOptionsImpl implements IndexingOptions
{
- /** The length of the substring index. */
- private int substringLength;
+ /** The length of substring keys used in substring indexes. */
+ private int substringKeySize;
- /**
- * Creates a new JEIndexConfig instance.
- * @param substringLength The length of the substring.
- */
- private JEIndexConfig(int substringLength)
+ private IndexingOptionsImpl(int substringKeySize)
{
- this.substringLength = substringLength;
+ this.substringKeySize = substringKeySize;
}
- /** {@inheritDoc} */
@Override
public int substringKeySize()
{
- return substringLength;
+ return substringKeySize;
+ }
+ }
+
+ void closeAndDelete(WriteableTransaction txn)
+ {
+ close();
+ for (Index index : nameToIndexes.values())
+ {
+ index.delete(txn);
+ state.deleteRecord(txn, index.getName());
}
}
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AttributeIndexer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AttributeIndexer.java
deleted file mode 100644
index b04f244..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AttributeIndexer.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2009-2010 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.pluggable;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.DecodeException;
-import org.forgerock.opendj.ldap.schema.Schema;
-import org.forgerock.opendj.ldap.spi.IndexingOptions;
-import org.opends.server.types.Attribute;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-
-/**
- * This class implements an attribute indexer for matching rules in JE Backend.
- */
-final class AttributeIndexer extends Indexer
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** The attribute type for which this instance will generate index keys. */
- private final AttributeType attributeType;
-
- /**
- * The indexer which will generate the keys
- * for the associated extensible matching rule.
- */
- private final org.forgerock.opendj.ldap.spi.Indexer indexer;
-
- /**
- * Creates a new extensible indexer for JE backend.
- *
- * @param attributeType The attribute type for which an indexer is
- * required.
- * @param extensibleIndexer The extensible indexer to be used.
- */
- AttributeIndexer(AttributeType attributeType,
- org.forgerock.opendj.ldap.spi.Indexer extensibleIndexer)
- {
- this.attributeType = attributeType;
- this.indexer = extensibleIndexer;
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return attributeType.getNameOrOID() + "." + indexer.getIndexID();
- }
-
- /** {@inheritDoc} */
- @Override
- public void indexEntry(Entry entry, Set<ByteString> keys, IndexingOptions options)
- {
- List<Attribute> attrList = entry.getAttribute(attributeType);
- if (attrList != null)
- {
- indexAttribute(attrList, keys, options);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void modifyEntry(Entry oldEntry, Entry newEntry,
- List<Modification> mods, Map<ByteString, Boolean> modifiedKeys,
- IndexingOptions options)
- {
- List<Attribute> newAttributes = newEntry.getAttribute(attributeType, true);
- List<Attribute> oldAttributes = oldEntry.getAttribute(attributeType, true);
-
- indexAttribute(oldAttributes, modifiedKeys, false, options);
- indexAttribute(newAttributes, modifiedKeys, true, options);
- }
-
-
-
- /**
- * Generates the set of extensible index keys for an attribute.
- * @param attrList The attribute for which substring keys are required.
- * @param keys The set into which the generated keys will be inserted.
- */
- private void indexAttribute(List<Attribute> attrList, Set<ByteString> keys,
- IndexingOptions options)
- {
- if (attrList == null)
- {
- return;
- }
-
- for (Attribute attr : attrList)
- {
- if (!attr.isVirtual())
- {
- for (ByteString value : attr)
- {
- try
- {
- indexer.createKeys(Schema.getDefaultSchema(), value, options, keys);
- }
- catch (DecodeException e)
- {
- logger.traceException(e);
- }
- }
- }
- }
- }
-
- /**
- * Generates the set of index keys for an attribute.
- * @param attrList The attribute to be indexed.
- * @param modifiedKeys The map into which the modified
- * keys will be inserted.
- * @param insert <code>true</code> if generated keys should
- * be inserted or <code>false</code> otherwise.
- */
- private void indexAttribute(List<Attribute> attrList,
- Map<ByteString, Boolean> modifiedKeys, Boolean insert,
- IndexingOptions options)
- {
- if (attrList == null)
- {
- return;
- }
-
- final Set<ByteString> keys = new HashSet<ByteString>();
- indexAttribute(attrList, keys, options);
- computeModifiedKeys(modifiedKeys, insert, keys);
- }
-
- /**
- * Computes a map of index keys and a boolean flag indicating whether the
- * corresponding key will be inserted or deleted.
- *
- * @param modifiedKeys
- * A map containing the keys and a boolean. Keys corresponding to the
- * boolean value <code>true</code> should be inserted and
- * <code>false</code> should be deleted.
- * @param insert
- * <code>true</code> if generated keys should be inserted or
- * <code>false</code> otherwise.
- * @param keys
- * The index keys to map.
- */
- private static void computeModifiedKeys(Map<ByteString, Boolean> modifiedKeys,
- Boolean insert, Set<ByteString> keys)
- {
- for (ByteString key : keys)
- {
- Boolean cInsert = modifiedKeys.get(key);
- if (cInsert == null)
- {
- modifiedKeys.put(key, insert);
- }
- else if (!cInsert.equals(insert))
- {
- modifiedKeys.remove(key);
- }
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DN2ID.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DN2ID.java
index c62c888..917c324 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DN2ID.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DN2ID.java
@@ -40,7 +40,7 @@
* for each entry. The key is the normalized entry DN and the value
* is the entry ID.
*/
-class DN2ID extends DatabaseContainer
+class DN2ID extends AbstractDatabaseContainer
{
private final int prefixRDNComponents;
@@ -54,7 +54,7 @@
DN2ID(TreeName treeName, EntryContainer entryContainer) throws StorageRuntimeException
{
super(treeName);
- prefixRDNComponents = entryContainer.getBaseDN().size();
+ this.prefixRDNComponents = entryContainer.getBaseDN().size();
}
/**
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DN2URI.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DN2URI.java
index 200d24e..4765ddf 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DN2URI.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DN2URI.java
@@ -74,7 +74,7 @@
* as in the DN database so that all referrals in a subtree can be retrieved by
* cursoring through a range of the records.
*/
-class DN2URI extends DatabaseContainer
+class DN2URI extends AbstractDatabaseContainer
{
private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DatabaseContainer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DatabaseContainer.java
index 2698dc3..3cba1eb 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DatabaseContainer.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DatabaseContainer.java
@@ -35,21 +35,8 @@
* This class is a wrapper around the tree object and provides basic
* read and write methods for entries.
*/
-abstract class DatabaseContainer
+interface DatabaseContainer
{
- /** The name of the database within the entryContainer. */
- private TreeName name;
-
- /**
- * Create a new DatabaseContainer object.
- *
- * @param treeName The name of the entry database.
- */
- DatabaseContainer(TreeName treeName)
- {
- this.name = treeName;
- }
-
/**
* Opens a database in this database container. If the provided database configuration is
* transactional, a transaction will be created and used to perform the open.
@@ -59,10 +46,7 @@
* @throws StorageRuntimeException
* if a database error occurs while opening the index.
*/
- void open(WriteableTransaction txn) throws StorageRuntimeException
- {
- txn.openTree(name);
- }
+ void open(WriteableTransaction txn) throws StorageRuntimeException;
/**
* Deletes this database and all of its contents.
@@ -72,10 +56,7 @@
* @throws StorageRuntimeException
* if a database error occurs while deleting the index.
*/
- void delete(WriteableTransaction txn) throws StorageRuntimeException
- {
- txn.deleteTree(name);
- }
+ void delete(WriteableTransaction txn) throws StorageRuntimeException;
/**
* Returns the number of key/value pairs in this database container.
@@ -86,39 +67,26 @@
* @throws StorageRuntimeException
* If an error occurs in the DB operation.
*/
- long getRecordCount(ReadableTransaction txn) throws StorageRuntimeException
- {
- return txn.getRecordCount(name);
- }
+ long getRecordCount(ReadableTransaction txn) throws StorageRuntimeException;
/**
* Get a string representation of this object.
*
* @return return A string representation of this object.
*/
- @Override
- public String toString()
- {
- return name.toString();
- }
+ String toString();
/**
* Get the database name for this database container.
*
* @return database name for this database container.
*/
- final TreeName getName()
- {
- return name;
- }
+ TreeName getName();
/**
* Set the database name to use for this container.
*
* @param name The database name to use for this container.
*/
- final void setName(TreeName name)
- {
- this.name = name;
- }
+ void setName(TreeName name);
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DatabaseEnvironmentMonitor.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DatabaseEnvironmentMonitor.java
index beecebf..566b495 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DatabaseEnvironmentMonitor.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DatabaseEnvironmentMonitor.java
@@ -196,9 +196,7 @@
AttributeBuilder needReindex = new AttributeBuilder("need-reindex");
for(EntryContainer ec : rootContainer.getEntryContainers())
{
- List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>();
- ec.listDatabases(databases);
- for(DatabaseContainer dc : databases)
+ for(DatabaseContainer dc : ec.listDatabases())
{
if(dc instanceof Index && !((Index)dc).isTrusted())
{
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DefaultIndex.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DefaultIndex.java
new file mode 100644
index 0000000..83bf6ec
--- /dev/null
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/DefaultIndex.java
@@ -0,0 +1,379 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
+ * or http://forgerock.org/license/CDDLv1.0.html.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at legal-notices/CDDLv1_0.txt.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information:
+ * Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2006-2010 Sun Microsystems, Inc.
+ * Portions Copyright 2012-2015 ForgeRock AS
+ */
+package org.opends.server.backends.pluggable;
+
+import static org.forgerock.util.Reject.checkNotNull;
+import static org.opends.messages.JebMessages.ERR_JEB_INDEX_CORRUPT_REQUIRES_REBUILD;
+import static org.opends.server.backends.pluggable.EntryIDSet.*;
+import static org.opends.server.backends.pluggable.State.IndexFlag.COMPACTED;
+import static org.opends.server.backends.pluggable.State.IndexFlag.TRUSTED;
+
+import java.util.EnumSet;
+
+import org.forgerock.i18n.slf4j.LocalizedLogger;
+import org.forgerock.opendj.ldap.ByteSequence;
+import org.forgerock.opendj.ldap.ByteString;
+import org.forgerock.util.promise.NeverThrowsException;
+import org.opends.server.backends.pluggable.CursorTransformer.ValueTransformer;
+import org.opends.server.backends.pluggable.EntryIDSet.EntryIDSetCodec;
+import org.opends.server.backends.pluggable.State.IndexFlag;
+import org.opends.server.backends.pluggable.spi.Cursor;
+import org.opends.server.backends.pluggable.spi.ReadableTransaction;
+import org.opends.server.backends.pluggable.spi.StorageRuntimeException;
+import org.opends.server.backends.pluggable.spi.TreeName;
+import org.opends.server.backends.pluggable.spi.UpdateFunction;
+import org.opends.server.backends.pluggable.spi.WriteableTransaction;
+import org.opends.server.util.StaticUtils;
+
+/**
+ * Represents an index implemented by a tree in which each key maps to a set of entry IDs. The key
+ * is a byte array, and is constructed from some normalized form of an attribute value (or fragment
+ * of a value) appearing in the entry.
+ */
+class DefaultIndex extends AbstractDatabaseContainer implements Index
+{
+ private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
+
+ /** The limit on the number of entry IDs that may be indexed by one key. */
+ private int indexEntryLimit;
+ /**
+ * Whether to maintain a count of IDs for a key once the entry limit has exceeded.
+ */
+ private final boolean maintainCount;
+
+ private final State state;
+
+ private final EntryIDSetCodec codec;
+
+ /**
+ * A flag to indicate if this index should be trusted to be consistent with the entries database.
+ * If not trusted, we assume that existing entryIDSets for a key is still accurate. However, keys
+ * that do not exist are undefined instead of an empty entryIDSet. The following rules will be
+ * observed when the index is not trusted: - no entryIDs will be added to a non-existing key. -
+ * undefined entryIdSet will be returned whenever a key is not found.
+ */
+ private volatile boolean trusted;
+
+ /**
+ * Create a new index object.
+ *
+ * @param name
+ * The name of the index database within the entryContainer.
+ * @param state
+ * The state database to persist index state info.
+ * @param indexEntryLimit
+ * The configured limit on the number of entry IDs that may be indexed by one key.
+ * @param maintainCount
+ * Whether to maintain a count of IDs for a key once the entry limit has exceeded.
+ * @param txn
+ * a non null database transaction
+ * @param entryContainer
+ * The database entryContainer holding this index.
+ * @throws StorageRuntimeException
+ * If an error occurs in the database.
+ */
+ DefaultIndex(TreeName name, State state, int indexEntryLimit, boolean maintainCount, WriteableTransaction txn,
+ EntryContainer entryContainer) throws StorageRuntimeException
+ {
+ super(name);
+ this.indexEntryLimit = indexEntryLimit;
+ this.maintainCount = maintainCount;
+ this.state = state;
+
+ final EnumSet<IndexFlag> flags = state.getIndexFlags(txn, getName());
+ this.codec = flags.contains(COMPACTED) ? CODEC_V2 : CODEC_V1;
+ this.trusted = flags.contains(TRUSTED);
+ if (!trusted && entryContainer.getHighestEntryID(txn).longValue() == 0)
+ {
+ // If there are no entries in the entry container then there
+ // is no reason why this index can't be upgraded to trusted.
+ setTrusted(txn, true);
+ }
+ }
+
+ public final Cursor<ByteString, EntryIDSet> openCursor(ReadableTransaction txn)
+ {
+ checkNotNull(txn, "txn must not be null");
+ return CursorTransformer.transformValues(txn.openCursor(getName()),
+ new ValueTransformer<ByteString, ByteString, EntryIDSet, NeverThrowsException>()
+ {
+ @Override
+ public EntryIDSet transform(ByteString key, ByteString value) throws NeverThrowsException
+ {
+ return codec.decode(key, value);
+ }
+ });
+ }
+
+ public final void importPut(WriteableTransaction txn, ImportIDSet idsToBeAdded) throws StorageRuntimeException
+ {
+ ByteSequence key = idsToBeAdded.getKey();
+ ByteString value = txn.read(getName(), key);
+ if (value != null)
+ {
+ final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount);
+ importIDSet.merge(idsToBeAdded);
+ txn.put(getName(), key, importIDSet.valueToByteString(codec));
+ }
+ else
+ {
+ txn.put(getName(), key, idsToBeAdded.valueToByteString(codec));
+ }
+ }
+
+ public final void importRemove(WriteableTransaction txn, ImportIDSet idsToBeRemoved) throws StorageRuntimeException
+ {
+ ByteSequence key = idsToBeRemoved.getKey();
+ ByteString value = txn.read(getName(), key);
+ if (value != null)
+ {
+ final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount);
+ importIDSet.remove(idsToBeRemoved);
+ if (importIDSet.isDefined() && importIDSet.size() == 0)
+ {
+ txn.delete(getName(), key);
+ }
+ else
+ {
+ txn.put(getName(), key, importIDSet.valueToByteString(codec));
+ }
+ }
+ else
+ {
+ // Should never happen -- the keys should always be there.
+ throw new RuntimeException();
+ }
+ }
+
+ public final void update(WriteableTransaction txn, ByteString key, EntryIDSet deletedIDs, EntryIDSet addedIDs)
+ throws StorageRuntimeException
+ {
+ /*
+ * Check the special condition where both deletedIDs and addedIDs are null. This is used when
+ * deleting entries and corresponding id2children and id2subtree records must be completely
+ * removed.
+ */
+ if (deletedIDs == null && addedIDs == null)
+ {
+ boolean success = txn.delete(getName(), key);
+ if (success && logger.isTraceEnabled())
+ {
+ StringBuilder builder = new StringBuilder();
+ StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4);
+ logger.trace("The expected key does not exist in the index %s.\nKey:%s ", getName(), builder);
+ }
+ return;
+ }
+
+ // Handle cases where nothing is changed early to avoid DB access.
+ if (isNullOrEmpty(deletedIDs) && isNullOrEmpty(addedIDs))
+ {
+ return;
+ }
+
+ if (maintainCount)
+ {
+ update0(txn, key, deletedIDs, addedIDs);
+ }
+ else if (get(txn, key).isDefined())
+ {
+ /*
+ * Avoid taking a write lock on a record which has hit all IDs because it is likely to be a
+ * point of contention.
+ */
+ update0(txn, key, deletedIDs, addedIDs);
+ } // else the record exists but we've hit all IDs.
+ }
+
+ private boolean isNullOrEmpty(EntryIDSet entryIDSet)
+ {
+ return entryIDSet == null || entryIDSet.size() == 0;
+ }
+
+ private boolean isNotEmpty(EntryIDSet entryIDSet)
+ {
+ return entryIDSet != null && entryIDSet.size() > 0;
+ }
+
+ private void update0(final WriteableTransaction txn, final ByteString key, final EntryIDSet deletedIDs,
+ final EntryIDSet addedIDs) throws StorageRuntimeException
+ {
+ txn.update(getName(), key, new UpdateFunction()
+ {
+ @Override
+ public ByteSequence computeNewValue(final ByteSequence oldValue)
+ {
+ if (oldValue != null)
+ {
+ EntryIDSet entryIDSet = computeEntryIDSet(key, oldValue.toByteString(), deletedIDs, addedIDs);
+ ByteString after = codec.encode(entryIDSet);
+ /*
+ * If there are no more IDs then return null indicating that the record should be removed.
+ * If index is not trusted then this will cause all subsequent reads for this key to
+ * return undefined set.
+ */
+ return after.isEmpty() ? null : after;
+ }
+ else if (trusted)
+ {
+ if (deletedIDs != null)
+ {
+ logIndexCorruptError(txn, key);
+ }
+ if (isNotEmpty(addedIDs))
+ {
+ return codec.encode(addedIDs);
+ }
+ }
+ return null; // no change.
+ }
+ });
+ }
+
+ private EntryIDSet computeEntryIDSet(ByteString key, ByteString value, EntryIDSet deletedIDs, EntryIDSet addedIDs)
+ {
+ EntryIDSet entryIDSet = codec.decode(key, value);
+ if (addedIDs != null)
+ {
+ if (entryIDSet.isDefined() && indexEntryLimit > 0)
+ {
+ long idCountDelta = addedIDs.size();
+ if (deletedIDs != null)
+ {
+ idCountDelta -= deletedIDs.size();
+ }
+ if (idCountDelta + entryIDSet.size() >= indexEntryLimit)
+ {
+ if (maintainCount)
+ {
+ entryIDSet = newUndefinedSetWithSize(key, entryIDSet.size() + idCountDelta);
+ }
+ else
+ {
+ entryIDSet = newUndefinedSet();
+ }
+
+ if (logger.isTraceEnabled())
+ {
+ StringBuilder builder = new StringBuilder();
+ StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4);
+ logger.trace("Index entry exceeded in index %s. " + "Limit: %d. ID list size: %d.\nKey:%s", getName(),
+ indexEntryLimit, idCountDelta + addedIDs.size(), builder);
+
+ }
+ }
+ else
+ {
+ entryIDSet.addAll(addedIDs);
+ if (deletedIDs != null)
+ {
+ entryIDSet.removeAll(deletedIDs);
+ }
+ }
+ }
+ else
+ {
+ entryIDSet.addAll(addedIDs);
+ if (deletedIDs != null)
+ {
+ entryIDSet.removeAll(deletedIDs);
+ }
+ }
+ }
+ else if (deletedIDs != null)
+ {
+ entryIDSet.removeAll(deletedIDs);
+ }
+ return entryIDSet;
+ }
+
+ private void logIndexCorruptError(WriteableTransaction txn, ByteString key)
+ {
+ if (logger.isTraceEnabled())
+ {
+ StringBuilder builder = new StringBuilder();
+ StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4);
+ logger.trace("The expected key does not exist in the index %s.\nKey:%s", getName(), builder);
+ }
+
+ setTrusted(txn, false);
+ logger.error(ERR_JEB_INDEX_CORRUPT_REQUIRES_REBUILD, getName());
+ }
+
+ public final EntryIDSet get(ReadableTransaction txn, ByteSequence key)
+ {
+ try
+ {
+ ByteString value = txn.read(getName(), key);
+ if (value != null)
+ {
+ return codec.decode(key, value);
+ }
+ return trusted ? newDefinedSet() : newUndefinedSet();
+ }
+ catch (StorageRuntimeException e)
+ {
+ logger.traceException(e);
+ return newUndefinedSet();
+ }
+ }
+
+ public final boolean setIndexEntryLimit(int indexEntryLimit)
+ {
+ final boolean rebuildRequired = this.indexEntryLimit < indexEntryLimit;
+ this.indexEntryLimit = indexEntryLimit;
+ return rebuildRequired;
+ }
+
+ public final int getIndexEntryLimit()
+ {
+ return indexEntryLimit;
+ }
+
+ public final synchronized void setTrusted(WriteableTransaction txn, boolean trusted) throws StorageRuntimeException
+ {
+ this.trusted = trusted;
+ if (trusted)
+ {
+ state.addFlagsToIndex(txn, getName(), TRUSTED);
+ }
+ else
+ {
+ state.removeFlagsFromIndex(txn, getName(), TRUSTED);
+ }
+ }
+
+ public final boolean isTrusted()
+ {
+ return trusted;
+ }
+
+ public final boolean getMaintainCount()
+ {
+ return maintainCount;
+ }
+}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryContainer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryContainer.java
index 7c2743c..cd46a51 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryContainer.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryContainer.java
@@ -73,6 +73,7 @@
import org.opends.server.api.VirtualAttributeProvider;
import org.opends.server.api.plugin.PluginResult.SubordinateDelete;
import org.opends.server.api.plugin.PluginResult.SubordinateModifyDN;
+import org.opends.server.backends.pluggable.State.IndexFlag;
import org.opends.server.backends.pluggable.spi.Cursor;
import org.opends.server.backends.pluggable.spi.ReadOperation;
import org.opends.server.backends.pluggable.spi.ReadableTransaction;
@@ -203,7 +204,7 @@
public void run(WriteableTransaction txn) throws Exception
{
//Try creating all the indexes before confirming they are valid ones.
- new AttributeIndex(cfg, EntryContainer.this, txn);
+ new AttributeIndex(cfg, state, EntryContainer.this, txn);
}
});
return true;
@@ -227,7 +228,7 @@
@Override
public void run(WriteableTransaction txn) throws Exception
{
- final AttributeIndex index = new AttributeIndex(cfg, EntryContainer.this, txn);
+ final AttributeIndex index = new AttributeIndex(cfg, state, EntryContainer.this, txn);
index.open(txn);
if (!index.isTrusted())
{
@@ -269,9 +270,7 @@
@Override
public void run(WriteableTransaction txn) throws Exception
{
- AttributeIndex index = attrIndexMap.get(cfg.getAttribute());
- deleteAttributeIndex(txn, index);
- attrIndexMap.remove(cfg.getAttribute());
+ attrIndexMap.remove(cfg.getAttribute()).closeAndDelete(txn);
}
});
}
@@ -411,9 +410,7 @@
@Override
public void run(WriteableTransaction txn) throws Exception
{
- VLVIndex vlvIndex = vlvIndexMap.get(cfg.getName().toLowerCase());
- deleteDatabase(txn, vlvIndex);
- vlvIndexMap.remove(cfg.getName());
+ vlvIndexMap.remove(cfg.getName().toLowerCase()).closeAndDelete(txn);
}
});
}
@@ -500,18 +497,7 @@
state = new State(getIndexName(STATE_DATABASE_NAME));
state.open(txn);
- if (config.isSubordinateIndexesEnabled())
- {
- openSubordinateIndexes(txn);
- }
- else
- {
- // Use a null index and ensure that future attempts to use the real
- // subordinate indexes will fail.
- id2children = openNewNullIndex(txn, ID2CHILDREN_DATABASE_NAME, new ID2CIndexer());
- id2subtree = openNewNullIndex(txn, ID2SUBTREE_DATABASE_NAME, new ID2SIndexer());
- logger.info(NOTE_JEB_SUBORDINATE_INDEXES_DISABLED, backend.getBackendID());
- }
+ openSubordinateIndexes(txn, config);
dn2uri = new DN2URI(getIndexName(REFERRAL_DATABASE_NAME), this);
dn2uri.open(txn);
@@ -520,7 +506,7 @@
{
BackendIndexCfg indexCfg = config.getBackendIndex(idx);
- AttributeIndex index = new AttributeIndex(indexCfg, this, txn);
+ AttributeIndex index = new AttributeIndex(indexCfg, state, this, txn);
index.open(txn);
if(!index.isTrusted())
{
@@ -552,9 +538,13 @@
}
}
- private NullIndex openNewNullIndex(WriteableTransaction txn, String indexId, Indexer indexer)
+ private NullIndex openNewNullIndex(WriteableTransaction txn, String name)
{
- return new NullIndex(getIndexName(indexId), indexer, state, txn, this);
+ final TreeName treeName = getIndexName(name);
+ final NullIndex index = new NullIndex(treeName);
+ state.removeFlagsFromIndex(txn, treeName, IndexFlag.TRUSTED);
+ txn.deleteTree(treeName);
+ return index;
}
/**
@@ -746,7 +736,7 @@
if (entryID != null)
{
final Index index = subtree ? id2subtree : id2children;
- final EntryIDSet entryIDSet = index.read(txn, entryID.toByteString());
+ final EntryIDSet entryIDSet = index.get(txn, entryID.toByteString());
long count = entryIDSet.size();
if (count != Long.MAX_VALUE)
{
@@ -920,11 +910,11 @@
EntryIDSet scopeSet;
if (searchScope == SearchScope.SINGLE_LEVEL)
{
- scopeSet = id2children.read(txn, baseIDData);
+ scopeSet = id2children.get(txn, baseIDData);
}
else
{
- scopeSet = id2subtree.read(txn, baseIDData);
+ scopeSet = id2subtree.get(txn, baseIDData);
if (searchScope == SearchScope.WHOLE_SUBTREE)
{
// The id2subtree list does not include the base entry ID.
@@ -1543,8 +1533,8 @@
if (parentDN != null)
{
final ByteString parentIDKeyBytes = parentID.toByteString();
- id2children.insertID(indexBuffer, parentIDKeyBytes, entryID);
- id2subtree.insertID(indexBuffer, parentIDKeyBytes, entryID);
+ indexBuffer.put(id2children, parentIDKeyBytes, entryID);
+ indexBuffer.put(id2subtree, parentIDKeyBytes, entryID);
// Iterate up through the superior entries, starting above the
// parent.
@@ -1558,7 +1548,7 @@
}
// Insert into id2subtree for this node.
- id2subtree.insertID(indexBuffer, nodeID.toByteString(), entryID);
+ indexBuffer.put(id2subtree, nodeID.toByteString(), entryID);
}
}
indexBuffer.flush(txn);
@@ -1839,8 +1829,8 @@
// Remove the id2c and id2s records for this entry.
final ByteString leafIDKeyBytes = leafID.toByteString();
- id2children.delete(indexBuffer, leafIDKeyBytes);
- id2subtree.delete(indexBuffer, leafIDKeyBytes);
+ indexBuffer.remove(id2children, leafIDKeyBytes);
+ indexBuffer.remove(id2subtree, leafIDKeyBytes);
// Iterate up through the superior entries from the target entry.
boolean isParent = true;
@@ -1858,10 +1848,10 @@
// Remove from id2children.
if (isParent)
{
- id2children.removeID(indexBuffer, parentIDBytes, leafID);
+ indexBuffer.remove(id2children, parentIDBytes, leafID);
isParent = false;
}
- id2subtree.removeID(indexBuffer, parentIDBytes, leafID);
+ indexBuffer.remove(id2subtree, parentIDBytes, leafID);
}
// Remove the entry from the entry cache.
@@ -2378,10 +2368,10 @@
ByteString parentIDKeyBytes = parentID.toByteString();
if(isParent)
{
- id2children.insertID(buffer, parentIDKeyBytes, newID);
+ buffer.put(id2children, parentIDKeyBytes, newID);
isParent = false;
}
- id2subtree.insertID(buffer, parentIDKeyBytes, newID);
+ buffer.put(id2subtree, parentIDKeyBytes, newID);
}
}
}
@@ -2423,10 +2413,10 @@
ByteString parentIDKeyBytes = parentID.toByteString();
if(isParent)
{
- id2children.removeID(buffer, parentIDKeyBytes, oldID);
+ buffer.remove(id2children, parentIDKeyBytes, oldID);
isParent = false;
}
- id2subtree.removeID(buffer, parentIDKeyBytes, oldID);
+ buffer.remove(id2subtree, parentIDKeyBytes, oldID);
}
}
@@ -2435,8 +2425,8 @@
// All the subordinates will be renumbered so we have to rebuild
// id2c and id2s with the new ID.
ByteString oldIDKeyBytes = oldID.toByteString();
- id2children.delete(buffer, oldIDKeyBytes);
- id2subtree.delete(buffer, oldIDKeyBytes);
+ buffer.remove(id2children, oldIDKeyBytes);
+ buffer.remove(id2subtree, oldIDKeyBytes);
// Reindex the entry with the new ID.
indexRemoveEntry(buffer, oldEntry, oldID);
@@ -2527,7 +2517,7 @@
{
EntryID parentID = dn2id.get(txn, dn);
ByteString parentIDKeyBytes = parentID.toByteString();
- id2subtree.removeID(buffer, parentIDKeyBytes, oldID);
+ buffer.remove(id2subtree, parentIDKeyBytes, oldID);
}
}
@@ -2536,8 +2526,8 @@
// All the subordinates will be renumbered so we have to rebuild
// id2c and id2s with the new ID.
ByteString oldIDKeyBytes = oldID.toByteString();
- id2children.delete(buffer, oldIDKeyBytes);
- id2subtree.delete(buffer, oldIDKeyBytes);
+ buffer.remove(id2children, oldIDKeyBytes);
+ buffer.remove(id2subtree, oldIDKeyBytes);
// Reindex the entry with the new ID.
indexRemoveEntry(buffer, oldEntry, oldID);
@@ -2674,7 +2664,7 @@
final EntryID entryID = dn2id.get(txn, baseDN);
if (entryID != null)
{
- final EntryIDSet entryIDSet = id2subtree.read(txn, entryID.toByteString());
+ final EntryIDSet entryIDSet = id2subtree.get(txn, entryID.toByteString());
long count = entryIDSet.size();
if(count != Long.MAX_VALUE)
{
@@ -2695,31 +2685,6 @@
}
}
-
- /**
- * Get a list of the databases opened by the entryContainer.
- * @param dbList A list of database containers.
- */
- void listDatabases(List<DatabaseContainer> dbList)
- {
- dbList.add(dn2id);
- dbList.add(id2entry);
- dbList.add(dn2uri);
- if (config.isSubordinateIndexesEnabled())
- {
- dbList.add(id2children);
- dbList.add(id2subtree);
- }
- dbList.add(state);
-
- for(AttributeIndex index : attrIndexMap.values())
- {
- index.listDatabases(dbList);
- }
-
- dbList.addAll(vlvIndexMap.values());
- }
-
/**
* Determine whether the provided operation has the ManageDsaIT request
* control.
@@ -2756,10 +2721,7 @@
*/
void delete(WriteableTransaction txn) throws StorageRuntimeException
{
- List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>();
- listDatabases(databases);
-
- for (DatabaseContainer db : databases)
+ for (DatabaseContainer db : listDatabases())
{
db.delete(txn);
}
@@ -2788,24 +2750,6 @@
}
/**
- * Removes a attribute index from disk.
- *
- * @param attributeIndex The attribute index to remove.
- * @throws StorageRuntimeException If an database error occurs while attempting
- * to delete the index.
- */
- private void deleteAttributeIndex(WriteableTransaction txn, AttributeIndex attributeIndex)
- throws StorageRuntimeException
- {
- attributeIndex.close();
- for (Index index : attributeIndex.getAllIndexes())
- {
- index.delete(txn);
- state.deleteRecord(txn, index.getName());
- }
- }
-
- /**
* This method constructs a container name from a base DN. Only alphanumeric
* characters are preserved, all other characters are replaced with an
* underscore.
@@ -2826,8 +2770,7 @@
*/
void setDatabasePrefix(final String newBaseDN) throws StorageRuntimeException
{
- final List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>();
- listDatabases(databases);
+ final List<DatabaseContainer> databases = listDatabases();
try
{
// Rename in transaction.
@@ -2940,19 +2883,7 @@
{
if (config.isSubordinateIndexesEnabled() != cfg.isSubordinateIndexesEnabled())
{
- if (cfg.isSubordinateIndexesEnabled())
- {
- // Re-enabling subordinate indexes.
- openSubordinateIndexes(txn);
- }
- else
- {
- // Disabling subordinate indexes. Use a null index and ensure that
- // future attempts to use the real indexes will fail.
- id2children = openNewNullIndex(txn, ID2CHILDREN_DATABASE_NAME, new ID2CIndexer());
- id2subtree = openNewNullIndex(txn, ID2SUBTREE_DATABASE_NAME, new ID2SIndexer());
- logger.info(NOTE_JEB_SUBORDINATE_INDEXES_DISABLED, cfg.getBackendId());
- }
+ openSubordinateIndexes(txn, cfg);
}
if (config.getIndexEntryLimit() != cfg.getIndexEntryLimit())
@@ -3018,8 +2949,7 @@
private void clear0(WriteableTransaction txn) throws StorageRuntimeException
{
- final List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>();
- listDatabases(databases);
+ final List<DatabaseContainer> databases = listDatabases();
try
{
for (DatabaseContainer db : databases)
@@ -3044,6 +2974,28 @@
}
}
+ List<DatabaseContainer> listDatabases()
+ {
+ final List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>();
+ databases.add(dn2id);
+ databases.add(id2entry);
+ databases.add(dn2uri);
+ if (config.isSubordinateIndexesEnabled())
+ {
+ databases.add(id2children);
+ databases.add(id2subtree);
+ }
+ databases.add(state);
+
+ for (AttributeIndex index : attrIndexMap.values())
+ {
+ index.listDatabases(databases);
+ }
+
+ databases.addAll(vlvIndexMap.values());
+ return databases;
+ }
+
/**
* Clear the contents for a database from disk.
*
@@ -3092,35 +3044,34 @@
}
/** Opens the id2children and id2subtree indexes. */
- private void openSubordinateIndexes(WriteableTransaction txn)
+ private void openSubordinateIndexes(WriteableTransaction txn, PluggableBackendCfg cfg)
{
- id2children = newIndex(txn, ID2CHILDREN_DATABASE_NAME, new ID2CIndexer());
- id2subtree = newIndex(txn, ID2SUBTREE_DATABASE_NAME, new ID2SIndexer());
- }
-
- private Index newIndex(WriteableTransaction txn, String name, Indexer indexer)
- {
- final Index index = new Index(getIndexName(name), indexer, state, config.getIndexEntryLimit(), 0, true, txn, this);
- index.open(txn);
- if (!index.isTrusted())
+ if (cfg.isSubordinateIndexesEnabled())
{
- logger.info(NOTE_JEB_INDEX_ADD_REQUIRES_REBUILD, index.getName());
- }
- return index;
- }
+ TreeName name = getIndexName(ID2CHILDREN_DATABASE_NAME);
+ id2children = new DefaultIndex(name, state, config.getIndexEntryLimit(), true, txn, this);
+ id2children.open(txn);
+ if (!id2children.isTrusted())
+ {
+ logger.info(NOTE_JEB_INDEX_ADD_REQUIRES_REBUILD, name);
+ }
- /**
- * Creates a new index for an attribute.
- *
- * @param txn a non null database transaction
- * @param indexName the name to give to the new index
- * @param indexer the indexer to use when inserting data into the index
- * @param indexEntryLimit the index entry limit
- * @return a new index
- */
- Index newIndexForAttribute(WriteableTransaction txn, TreeName indexName, Indexer indexer, int indexEntryLimit)
- {
- return new Index(indexName, indexer, state, indexEntryLimit, CURSOR_ENTRY_LIMIT, false, txn, this);
+ name = getIndexName(ID2SUBTREE_DATABASE_NAME);
+ id2subtree = new DefaultIndex(name, state, config.getIndexEntryLimit(), true, txn, this);
+ id2subtree.open(txn);
+ if (!id2subtree.isTrusted())
+ {
+ logger.info(NOTE_JEB_INDEX_ADD_REQUIRES_REBUILD, name);
+ }
+ }
+ else
+ {
+ // Disabling subordinate indexes. Use a null index and ensure that
+ // future attempts to use the real indexes will fail.
+ id2children = openNewNullIndex(txn, ID2CHILDREN_DATABASE_NAME);
+ id2subtree = openNewNullIndex(txn, ID2SUBTREE_DATABASE_NAME);
+ logger.info(NOTE_JEB_SUBORDINATE_INDEXES_DISABLED, cfg.getBackendId());
+ }
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryIDSet.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryIDSet.java
index a65b061..c9d7924 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryIDSet.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryIDSet.java
@@ -45,7 +45,6 @@
* Represents a set of Entry IDs. It can represent a set where the IDs are not defined, for example when the index entry
* limit has been exceeded.
*/
-@SuppressWarnings("javadoc")
final class EntryIDSet implements Iterable<EntryID>
{
public static final EntryIDSetCodec CODEC_V1 = new EntryIDSetCodecV1();
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2CIndexer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2CIndexer.java
deleted file mode 100644
index fb3a22b..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2CIndexer.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.pluggable;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.spi.IndexingOptions;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-
-/**
- * Implementation of an Indexer for the children index.
- */
-class ID2CIndexer extends Indexer
-{
- /**
- * Create a new indexer for a children index.
- */
- public ID2CIndexer()
- {
- // No implementation required.
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return "id2children";
- }
-
- /** {@inheritDoc} */
- @Override
- public void indexEntry(Entry entry, Set<ByteString> addKeys, IndexingOptions options)
- {
- // The superior entry IDs are in the entry attachment.
- @SuppressWarnings("unchecked")
- ArrayList<EntryID> ids = (ArrayList<EntryID>) entry.getAttachment();
-
- // Skip the entry's own ID.
- Iterator<EntryID> iter = ids.iterator();
- iter.next();
-
- // Get the parent ID.
- if (iter.hasNext())
- {
- addKeys.add(iter.next().toByteString());
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void modifyEntry(Entry oldEntry, Entry newEntry,
- List<Modification> mods,
- Map<ByteString, Boolean> modifiedKeys, IndexingOptions options)
- {
- // Nothing to do.
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2Entry.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2Entry.java
index aec2541..912c64e 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2Entry.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2Entry.java
@@ -57,7 +57,7 @@
* Represents the database containing the LDAP entries. The database key is
* the entry ID and the value is the entry contents.
*/
-class ID2Entry extends DatabaseContainer
+class ID2Entry extends AbstractDatabaseContainer
{
private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2SIndexer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2SIndexer.java
deleted file mode 100644
index ef25d33..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2SIndexer.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.pluggable;
-
-import java.util.*;
-
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.spi.IndexingOptions;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-
-/**
- * Implementation of an Indexer for the subtree index.
- */
-class ID2SIndexer extends Indexer
-{
- /**
- * Create a new indexer for a subtree index.
- */
- public ID2SIndexer()
- {
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return "id2subtree";
- }
-
- /** {@inheritDoc} */
- @Override
- public void indexEntry(Entry entry, Set<ByteString> addKeys, IndexingOptions options)
- {
- // The superior entry IDs are in the entry attachment.
- @SuppressWarnings("unchecked")
- ArrayList<EntryID> ids = (ArrayList<EntryID>) entry.getAttachment();
-
- // Skip the entry's own ID.
- Iterator<EntryID> iter = ids.iterator();
- iter.next();
-
- // Iterate through the superior IDs.
- while (iter.hasNext())
- {
- ByteString nodeIDData = iter.next().toByteString();
- addKeys.add(nodeIDData);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void modifyEntry(Entry oldEntry, Entry newEntry,
- List<Modification> mods,
- Map<ByteString, Boolean> modifiedKeys, IndexingOptions options)
- {
- // Nothing to do.
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Importer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Importer.java
index 631f825..820e074 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Importer.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Importer.java
@@ -101,6 +101,7 @@
import org.opends.server.backends.RebuildConfig;
import org.opends.server.backends.RebuildConfig.RebuildMode;
import org.opends.server.backends.persistit.PersistItStorage;
+import org.opends.server.backends.pluggable.AttributeIndex.MatchingRuleIndex;
import org.opends.server.backends.pluggable.spi.Cursor;
import org.opends.server.backends.pluggable.spi.ReadableTransaction;
import org.opends.server.backends.pluggable.spi.Storage;
@@ -661,7 +662,7 @@
putInIdContainerMap(attributeIndex.getSubstringIndex());
putInIdContainerMap(attributeIndex.getOrderingIndex());
putInIdContainerMap(attributeIndex.getApproximateIndex());
- Map<String, Collection<Index>> extensibleMap = attributeIndex.getExtensibleIndexes();
+ Map<String, Collection<MatchingRuleIndex>> extensibleMap = attributeIndex.getExtensibleIndexes();
if (!extensibleMap.isEmpty())
{
putInIdContainerMap(extensibleMap.get(EXTENSIBLE_INDEXER_ID_SUBSTRING));
@@ -670,7 +671,7 @@
}
}
- private void putInIdContainerMap(Collection<Index> indexes)
+ private void putInIdContainerMap(Collection<MatchingRuleIndex> indexes)
{
if (indexes != null)
{
@@ -1527,8 +1528,8 @@
importCount.getAndIncrement();
}
- void processAllIndexes(Suffix suffix, Entry entry, EntryID entryID)
- throws DirectoryException, StorageRuntimeException, InterruptedException
+ void processAllIndexes(Suffix suffix, Entry entry, EntryID entryID) throws StorageRuntimeException,
+ InterruptedException
{
for (Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix.getAttrIndexMap().entrySet())
{
@@ -1537,7 +1538,7 @@
}
@Override
- void processAttribute(Index index, Entry entry, EntryID entryID, IndexingOptions options,
+ void processAttribute(MatchingRuleIndex index, Entry entry, EntryID entryID, IndexingOptions options,
IndexKey indexKey) throws StorageRuntimeException, InterruptedException
{
if (oldEntry != null)
@@ -1631,7 +1632,6 @@
}
/** Examine the DN for duplicates and missing parents. */
- @SuppressWarnings("javadoc")
boolean dnSanityCheck(DN entryDN, Entry entry, Suffix suffix)
throws StorageRuntimeException, InterruptedException
{
@@ -1662,8 +1662,8 @@
return true;
}
- void processIndexes(Suffix suffix, Entry entry, EntryID entryID)
- throws DirectoryException, StorageRuntimeException, InterruptedException
+ void processIndexes(Suffix suffix, Entry entry, EntryID entryID) throws StorageRuntimeException,
+ InterruptedException
{
for (Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix.getAttrIndexMap().entrySet())
{
@@ -1676,7 +1676,7 @@
}
void fillIndexKey(AttributeIndex attrIndex, Entry entry, AttributeType attrType, EntryID entryID)
- throws InterruptedException, DirectoryException, StorageRuntimeException
+ throws InterruptedException, StorageRuntimeException
{
final IndexingOptions options = attrIndex.getIndexingOptions();
@@ -1686,12 +1686,12 @@
processAttribute(attrIndex.getOrderingIndex(), ImportIndexType.ORDERING, entry, attrType, entryID, options);
processAttribute(attrIndex.getApproximateIndex(), ImportIndexType.APPROXIMATE, entry, attrType, entryID, options);
- Map<String, Collection<Index>> extensibleMap = attrIndex.getExtensibleIndexes();
+ Map<String, Collection<MatchingRuleIndex>> extensibleMap = attrIndex.getExtensibleIndexes();
if (!extensibleMap.isEmpty())
{
- Collection<Index> subIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SUBSTRING);
+ Collection<MatchingRuleIndex> subIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SUBSTRING);
processAttributes(subIndexes, ImportIndexType.EX_SUBSTRING, entry, attrType, entryID, options);
- Collection<Index> sharedIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SHARED);
+ Collection<MatchingRuleIndex> sharedIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SHARED);
processAttributes(sharedIndexes, ImportIndexType.EX_SHARED, entry, attrType, entryID, options);
}
}
@@ -1707,19 +1707,19 @@
buffer.flush(txn);
}
- private void processAttributes(Collection<Index> indexes, ImportIndexType indexType, Entry entry,
+ private void processAttributes(Collection<MatchingRuleIndex> indexes, ImportIndexType indexType, Entry entry,
AttributeType attributeType, EntryID entryID, IndexingOptions options) throws InterruptedException
{
if (indexes != null)
{
- for (Index index : indexes)
+ for (MatchingRuleIndex index : indexes)
{
processAttribute(index, indexType, entry, attributeType, entryID, options);
}
}
}
- private void processAttribute(Index index, ImportIndexType indexType, Entry entry,
+ private void processAttribute(MatchingRuleIndex index, ImportIndexType indexType, Entry entry,
AttributeType attributeType, EntryID entryID, IndexingOptions options) throws InterruptedException
{
if (index != null)
@@ -1729,7 +1729,7 @@
}
}
- void processAttribute(Index index, Entry entry, EntryID entryID, IndexingOptions options,
+ void processAttribute(MatchingRuleIndex index, Entry entry, EntryID entryID, IndexingOptions options,
IndexKey indexKey) throws StorageRuntimeException, InterruptedException
{
insertKeySet.clear();
@@ -2114,12 +2114,12 @@
if (deleteSet.size() > 0 || !deleteSet.isDefined())
{
final Index index = indexIDToIndexMap.get(indexID);
- index.delete(txn, deleteSet);
+ index.importRemove(txn, deleteSet);
}
if (insertSet.size() > 0 || !insertSet.isDefined())
{
final Index index = indexIDToIndexMap.get(indexID);
- index.insert(txn, insertSet);
+ index.importPut(txn, insertSet);
}
}
}
@@ -2353,7 +2353,7 @@
{
for (ImportIDSet idSet : map.values())
{
- index.insert(txn, idSet);
+ index.importPut(txn, idSet);
}
if (clearMap)
{
@@ -2841,12 +2841,12 @@
private final PluggableBackendCfg cfg;
/** Map of index keys to indexes. */
- private final Map<IndexKey, Index> indexMap =
- new LinkedHashMap<IndexKey, Index>();
+ private final Map<IndexKey, MatchingRuleIndex> indexMap =
+ new LinkedHashMap<IndexKey, MatchingRuleIndex>();
/** Map of index keys to extensible indexes. */
- private final Map<IndexKey, Collection<Index>> extensibleIndexMap =
- new LinkedHashMap<IndexKey, Collection<Index>>();
+ private final Map<IndexKey, Collection<MatchingRuleIndex>> extensibleIndexMap =
+ new LinkedHashMap<IndexKey, Collection<MatchingRuleIndex>>();
/** List of VLV indexes. */
private final List<VLVIndex> vlvIndexes = new LinkedList<VLVIndex>();
@@ -3096,23 +3096,23 @@
fillIndexMap(txn, attrType, attrIndex.getPresenceIndex(), ImportIndexType.PRESENCE, onlyDegraded);
fillIndexMap(txn, attrType, attrIndex.getApproximateIndex(), ImportIndexType.APPROXIMATE, onlyDegraded);
- final Map<String, Collection<Index>> extensibleMap = attrIndex.getExtensibleIndexes();
+ final Map<String, Collection<MatchingRuleIndex>> extensibleMap = attrIndex.getExtensibleIndexes();
if (!extensibleMap.isEmpty())
{
- final Collection<Index> subIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SUBSTRING);
+ final Collection<MatchingRuleIndex> subIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SUBSTRING);
fillIndexMap(txn, attrType, subIndexes, ImportIndexType.EX_SUBSTRING, onlyDegraded);
- final Collection<Index> sharedIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SHARED);
+ final Collection<MatchingRuleIndex> sharedIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SHARED);
fillIndexMap(txn, attrType, sharedIndexes, ImportIndexType.EX_SHARED, onlyDegraded);
}
}
- private void fillIndexMap(WriteableTransaction txn, AttributeType attrType, Collection<Index> indexes,
+ private void fillIndexMap(WriteableTransaction txn, AttributeType attrType, Collection<MatchingRuleIndex> indexes,
ImportIndexType importIndexType, boolean onlyDegraded)
{
if (indexes != null && !indexes.isEmpty())
{
- final List<Index> mutableCopy = new LinkedList<Index>(indexes);
- for (final Iterator<Index> it = mutableCopy.iterator(); it.hasNext();)
+ final List<MatchingRuleIndex> mutableCopy = new LinkedList<MatchingRuleIndex>(indexes);
+ for (final Iterator<MatchingRuleIndex> it = mutableCopy.iterator(); it.hasNext();)
{
final Index index = it.next();
if (!onlyDegraded || !index.isTrusted())
@@ -3135,11 +3135,10 @@
}
}
- private void fillIndexMap(WriteableTransaction txn, AttributeType attrType, Index index,
+ private void fillIndexMap(WriteableTransaction txn, AttributeType attrType, MatchingRuleIndex index,
ImportIndexType importIndexType, boolean onlyDegraded)
{
- if (index != null
- && (!onlyDegraded || !index.isTrusted())
+ if (index != null && (!onlyDegraded || !index.isTrusted())
&& (!rebuildConfig.isClearDegradedState() || index.getRecordCount(txn) == 0))
{
putInIdContainerMap(index);
@@ -3179,7 +3178,7 @@
if (!extensibleIndexMap.isEmpty())
{
- for (final Collection<Index> subIndexes : extensibleIndexMap.values())
+ for (final Collection<MatchingRuleIndex> subIndexes : extensibleIndexMap.values())
{
if (subIndexes != null)
{
@@ -3220,7 +3219,7 @@
}
if (!extensibleIndexMap.isEmpty())
{
- for (Collection<Index> subIndexes : extensibleIndexMap.values())
+ for (Collection<MatchingRuleIndex> subIndexes : extensibleIndexMap.values())
{
setTrusted(txn, subIndexes, trusted);
}
@@ -3232,7 +3231,7 @@
}
}
- private void setTrusted(WriteableTransaction txn, final Collection<Index> indexes, boolean trusted)
+ private void setTrusted(WriteableTransaction txn, final Collection<MatchingRuleIndex> indexes, boolean trusted)
{
if (indexes != null && !indexes.isEmpty())
{
@@ -3493,7 +3492,7 @@
private void processExtensibleIndexes(Entry entry, EntryID entryID)
throws InterruptedException
{
- for (Map.Entry<IndexKey, Collection<Index>> mapEntry :
+ for (Map.Entry<IndexKey, Collection<MatchingRuleIndex>> mapEntry :
this.extensibleIndexMap.entrySet())
{
IndexKey key = mapEntry.getKey();
@@ -3502,7 +3501,7 @@
{
AttributeIndex attributeIndex = entryContainer.getAttributeIndex(attrType);
IndexingOptions options = attributeIndex.getIndexingOptions();
- for (Index index : mapEntry.getValue())
+ for (MatchingRuleIndex index : mapEntry.getValue())
{
processAttribute(index, entry, entryID, options, key);
}
@@ -3513,7 +3512,7 @@
private void processIndexes(Entry entry, EntryID entryID)
throws StorageRuntimeException, InterruptedException
{
- for (Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet())
+ for (Map.Entry<IndexKey, MatchingRuleIndex> mapEntry : indexMap.entrySet())
{
IndexKey key = mapEntry.getKey();
AttributeType attrType = key.getAttributeType();
@@ -3521,7 +3520,7 @@
{
AttributeIndex attributeIndex = entryContainer.getAttributeIndex(attrType);
IndexingOptions options = attributeIndex.getIndexingOptions();
- Index index = mapEntry.getValue();
+ MatchingRuleIndex index = mapEntry.getValue();
processAttribute(index, entry, entryID, options, key);
}
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Index.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Index.java
index 7491069..b7de9e4 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Index.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Index.java
@@ -26,641 +26,38 @@
*/
package org.opends.server.backends.pluggable;
-import static org.forgerock.util.Reject.*;
-import static org.opends.messages.JebMessages.*;
-import static org.opends.server.backends.pluggable.EntryIDSet.*;
-import static org.opends.server.backends.pluggable.State.IndexFlag.*;
-
-import java.util.ArrayList;
-import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-
-import org.forgerock.i18n.slf4j.LocalizedLogger;
import org.forgerock.opendj.ldap.ByteSequence;
import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ConditionResult;
-import org.forgerock.opendj.ldap.spi.IndexingOptions;
-import org.forgerock.util.promise.NeverThrowsException;
-import org.opends.server.backends.pluggable.CursorTransformer.ValueTransformer;
-import org.opends.server.backends.pluggable.EntryIDSet.EntryIDSetCodec;
-import org.opends.server.backends.pluggable.IndexBuffer.BufferedIndexValues;
-import org.opends.server.backends.pluggable.State.IndexFlag;
import org.opends.server.backends.pluggable.spi.Cursor;
import org.opends.server.backends.pluggable.spi.ReadableTransaction;
-import org.opends.server.backends.pluggable.spi.StorageRuntimeException;
-import org.opends.server.backends.pluggable.spi.TreeName;
-import org.opends.server.backends.pluggable.spi.UpdateFunction;
import org.opends.server.backends.pluggable.spi.WriteableTransaction;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-import org.opends.server.util.StaticUtils;
/**
- * Represents an index implemented by a tree in which each key maps to
- * a set of entry IDs. The key is a byte array, and is constructed from some
- * normalized form of an attribute value (or fragment of a value) appearing
- * in the entry.
+ * Represents an index implemented by a tree in which each key maps to a set of entry IDs. The key
+ * is a byte array, and is constructed from some normalized form of an attribute value (or fragment
+ * of a value) appearing in the entry.
*/
-class Index extends DatabaseContainer
+interface Index extends DatabaseContainer
{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
+ EntryIDSet get(ReadableTransaction txn, ByteSequence key);
- /** The indexer object to construct index keys from LDAP attribute values. */
- private Indexer indexer;
+ int getIndexEntryLimit();
- /** The limit on the number of entry IDs that may be indexed by one key. */
- private int indexEntryLimit;
- /**
- * Limit on the number of entry IDs that may be retrieved by cursoring
- * through an index.
- */
- private final int cursorEntryLimit;
- /**
- * Number of keys that have exceeded the entry limit since this
- * object was created.
- */
- private int entryLimitExceededCount;
+ boolean getMaintainCount();
- /**
- * Whether to maintain a count of IDs for a key once the entry limit
- * has exceeded.
- */
- private final boolean maintainCount;
+ // Ignores trusted state.
+ void importPut(WriteableTransaction txn, ImportIDSet idsToBeAdded);
- private final State state;
+ // Ignores trusted state.
+ void importRemove(WriteableTransaction txn, ImportIDSet idsToBeRemoved);
- private final EntryIDSetCodec codec;
+ boolean isTrusted();
- /**
- * A flag to indicate if this index should be trusted to be consistent
- * with the entries database. If not trusted, we assume that existing
- * entryIDSets for a key is still accurate. However, keys that do not
- * exist are undefined instead of an empty entryIDSet. The following
- * rules will be observed when the index is not trusted:
- *
- * - no entryIDs will be added to a non-existing key.
- * - undefined entryIdSet will be returned whenever a key is not found.
- */
- private boolean trusted;
+ Cursor<ByteString, EntryIDSet> openCursor(ReadableTransaction txn);
- /**
- * Create a new index object.
- * @param name The name of the index database within the entryContainer.
- * @param indexer The indexer object to construct index keys from LDAP
- * attribute values.
- * @param state The state database to persist index state info.
- * @param indexEntryLimit The configured limit on the number of entry IDs
- * that may be indexed by one key.
- * @param cursorEntryLimit The configured limit on the number of entry IDs
- * @param maintainCount Whether to maintain a count of IDs for a key once
- * the entry limit has exceeded.
- * @param txn a non null database transaction
- * @param entryContainer The database entryContainer holding this index.
- * @throws StorageRuntimeException If an error occurs in the database.
- */
- Index(TreeName name, Indexer indexer, State state, int indexEntryLimit, int cursorEntryLimit, boolean maintainCount,
- WriteableTransaction txn, EntryContainer entryContainer) throws StorageRuntimeException
- {
- super(name);
- this.indexer = indexer;
- this.indexEntryLimit = indexEntryLimit;
- this.cursorEntryLimit = cursorEntryLimit;
- this.maintainCount = maintainCount;
- this.state = state;
+ boolean setIndexEntryLimit(int indexEntryLimit);
- final EnumSet<IndexFlag> flags = state.getIndexFlags(txn, getName());
- this.codec = flags.contains(COMPACTED) ? CODEC_V2 : CODEC_V1;
- this.trusted = flags.contains(TRUSTED);
- if (!trusted && entryContainer.getHighestEntryID(txn).longValue() == 0)
- {
- // If there are no entries in the entry container then there
- // is no reason why this index can't be upgraded to trusted.
- setTrusted(txn, true);
- }
- }
+ void setTrusted(WriteableTransaction txn, boolean trusted);
- void indexEntry(Entry entry, Set<ByteString> keys, IndexingOptions options)
- {
- indexer.indexEntry(entry, keys, options);
- }
-
- final void insertID(IndexBuffer buffer, ByteString keyBytes, EntryID entryID)
- {
- getBufferedIndexValues(buffer, keyBytes).addEntryID(keyBytes, entryID);
- }
-
- final Cursor<ByteString, EntryIDSet> openCursor(ReadableTransaction txn) {
- checkNotNull(txn, "txn must not be null");
- return CursorTransformer.transformValues(txn.openCursor(getName()),
- new ValueTransformer<ByteString, ByteString, EntryIDSet, NeverThrowsException>()
- {
- @Override
- public EntryIDSet transform(ByteString key, ByteString value) throws NeverThrowsException
- {
- return codec.decode(key, value);
- }
- });
- }
-
- /**
- * Delete the specified import ID set from the import ID set associated with the key.
- *
- * @param txn a non null database transaction
- * @param importIdSet The import ID set to delete.
- * @throws StorageRuntimeException If a database error occurs.
- */
- final void delete(WriteableTransaction txn, ImportIDSet importIdSet) throws StorageRuntimeException
- {
- ByteSequence key = importIdSet.getKey();
- ByteString value = txn.read(getName(), key);
- if (value != null) {
- final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount);
- importIDSet.remove(importIdSet);
- if (importIDSet.isDefined() && importIDSet.size() == 0)
- {
- txn.delete(getName(), key);
- }
- else
- {
- value = importIDSet.valueToByteString(codec);
- txn.put(getName(), key, value);
- }
- } else {
- // Should never happen -- the keys should always be there.
- throw new RuntimeException();
- }
- }
-
- /**
- * Insert the specified import ID set into this index. Creates a DB cursor if needed.
- *
- * @param txn a non null database transaction
- * @param importIdSet The set of import IDs.
- * @throws StorageRuntimeException If a database error occurs.
- */
- final void insert(WriteableTransaction txn, ImportIDSet importIdSet) throws StorageRuntimeException
- {
- ByteSequence key = importIdSet.getKey();
- ByteString value = txn.read(getName(), key);
- if(value != null) {
- final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount);
- if (importIDSet.merge(importIdSet)) {
- entryLimitExceededCount++;
- }
- value = importIDSet.valueToByteString(codec);
- } else {
- if(!importIdSet.isDefined()) {
- entryLimitExceededCount++;
- }
- value = importIdSet.valueToByteString(codec);
- }
- txn.put(getName(), key, value);
- }
-
- void updateKey(WriteableTransaction txn, ByteString key, EntryIDSet deletedIDs, EntryIDSet addedIDs)
- throws StorageRuntimeException
- {
- /*
- * Check the special condition where both deletedIDs and addedIDs are null. This is used when
- * deleting entries and corresponding id2children and id2subtree records must be completely
- * removed.
- */
- if (deletedIDs == null && addedIDs == null)
- {
- boolean success = txn.delete(getName(), key);
- if (success && logger.isTraceEnabled())
- {
- StringBuilder builder = new StringBuilder();
- StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4);
- logger.trace("The expected key does not exist in the index %s.\nKey:%s ", getName(), builder);
- }
- return;
- }
-
- // Handle cases where nothing is changed early to avoid DB access.
- if (isNullOrEmpty(deletedIDs) && isNullOrEmpty(addedIDs))
- {
- return;
- }
-
- if (maintainCount)
- {
- updateKeyWithRMW(txn, key, deletedIDs, addedIDs);
- }
- else
- {
- /*
- * Avoid taking a write lock on a record which has hit all IDs because it is likely to be a
- * point of contention.
- */
- ByteString value = txn.read(getName(), key);
- if (value != null)
- {
- EntryIDSet entryIDSet = codec.decode(key, value);
- if (entryIDSet.isDefined())
- {
- updateKeyWithRMW(txn, key, deletedIDs, addedIDs);
- } // else the record exists but we've hit all IDs.
- }
- else if (trusted)
- {
- /*
- * The key was not present, but we cannot simply add it because another thread may have
- * added since.
- */
- updateKeyWithRMW(txn, key, deletedIDs, addedIDs);
- }
- }
- }
-
- private boolean isNullOrEmpty(EntryIDSet entryIDSet)
- {
- return entryIDSet == null || entryIDSet.size() == 0;
- }
-
- private boolean isNotEmpty(EntryIDSet entryIDSet)
- {
- return entryIDSet != null && entryIDSet.size() > 0;
- }
-
- private void updateKeyWithRMW(final WriteableTransaction txn, final ByteString key, final EntryIDSet deletedIDs,
- final EntryIDSet addedIDs) throws StorageRuntimeException
- {
- txn.update(getName(), key, new UpdateFunction()
- {
- @Override
- public ByteSequence computeNewValue(final ByteSequence oldValue)
- {
- if (oldValue != null)
- {
- EntryIDSet entryIDSet = computeEntryIDSet(key, oldValue.toByteString(), deletedIDs, addedIDs);
- ByteString after = codec.encode(entryIDSet);
- /*
- * If there are no more IDs then return null indicating that the record should be removed.
- * If index is not trusted then this will cause all subsequent reads for this key to
- * return undefined set.
- */
- return after.isEmpty() ? null : after;
- }
- else if (trusted)
- {
- if (deletedIDs != null)
- {
- logIndexCorruptError(txn, key);
- }
- if (isNotEmpty(addedIDs))
- {
- return codec.encode(addedIDs);
- }
- }
- return null; // no change.
- }
- });
- }
-
- private EntryIDSet computeEntryIDSet(ByteString key, ByteString value, EntryIDSet deletedIDs, EntryIDSet addedIDs)
- {
- EntryIDSet entryIDSet = codec.decode(key, value);
- if(addedIDs != null)
- {
- if(entryIDSet.isDefined() && indexEntryLimit > 0)
- {
- long idCountDelta = addedIDs.size();
- if(deletedIDs != null)
- {
- idCountDelta -= deletedIDs.size();
- }
- if(idCountDelta + entryIDSet.size() >= indexEntryLimit)
- {
- if(maintainCount)
- {
- entryIDSet = newUndefinedSetWithSize(key, entryIDSet.size() + idCountDelta);
- }
- else
- {
- entryIDSet = newUndefinedSet();
- }
- entryLimitExceededCount++;
-
- if(logger.isTraceEnabled())
- {
- StringBuilder builder = new StringBuilder();
- StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4);
- logger.trace("Index entry exceeded in index %s. " +
- "Limit: %d. ID list size: %d.\nKey:%s",
- getName(), indexEntryLimit, idCountDelta + addedIDs.size(), builder);
-
- }
- }
- else
- {
- entryIDSet.addAll(addedIDs);
- if(deletedIDs != null)
- {
- entryIDSet.removeAll(deletedIDs);
- }
- }
- }
- else
- {
- entryIDSet.addAll(addedIDs);
- if(deletedIDs != null)
- {
- entryIDSet.removeAll(deletedIDs);
- }
- }
- }
- else if(deletedIDs != null)
- {
- entryIDSet.removeAll(deletedIDs);
- }
- return entryIDSet;
- }
-
- final void removeID(IndexBuffer buffer, ByteString keyBytes, EntryID entryID)
- {
- getBufferedIndexValues(buffer, keyBytes).deleteEntryID(keyBytes, entryID);
- }
-
- private void logIndexCorruptError(WriteableTransaction txn, ByteString key)
- {
- if (logger.isTraceEnabled())
- {
- StringBuilder builder = new StringBuilder();
- StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4);
- logger.trace("The expected key does not exist in the index %s.\nKey:%s", getName(), builder);
- }
-
- setTrusted(txn, false);
- logger.error(ERR_JEB_INDEX_CORRUPT_REQUIRES_REBUILD, getName());
- }
-
- void delete(IndexBuffer buffer, ByteString keyBytes)
- {
- getBufferedIndexValues(buffer, keyBytes);
- }
-
- private BufferedIndexValues getBufferedIndexValues(IndexBuffer buffer, ByteString keyBytes)
- {
- return buffer.getBufferedIndexValues(this, keyBytes);
- }
-
- /**
- * Check if an entry ID is in the set of IDs indexed by a given key.
- *
- * @param txn
- * A database transaction.
- * @param key
- * The index key.
- * @param entryID
- * The entry ID.
- * @return true if the entry ID is indexed by the given key, false if it is not indexed by the
- * given key, undefined if the key has exceeded the entry limit.
- * @throws StorageRuntimeException
- * If an error occurs in the database.
- */
- ConditionResult containsID(ReadableTransaction txn, ByteString key, EntryID entryID)
- throws StorageRuntimeException
- {
- ByteString value = txn.read(getName(), key);
- if (value != null)
- {
- EntryIDSet entryIDSet = codec.decode(key, value);
- if (entryIDSet.isDefined())
- {
- return ConditionResult.valueOf(entryIDSet.contains(entryID));
- }
- return ConditionResult.UNDEFINED;
- }
- return trusted ? ConditionResult.FALSE : ConditionResult.UNDEFINED;
- }
-
- /**
- * Reads the value associated to a key.
- *
- * @param txn a non null database transaction
- * @param key The key to read
- * @return The non null set of entry IDs.
- */
- EntryIDSet read(ReadableTransaction txn, ByteSequence key)
- {
- try
- {
- ByteString value = txn.read(getName(), key);
- if (value != null)
- {
- return codec.decode(key, value);
- }
- return trusted ? newDefinedSet() : newUndefinedSet();
- }
- catch (StorageRuntimeException e)
- {
- logger.traceException(e);
- return newUndefinedSet();
- }
- }
-
- /**
- * Reads a range of keys and collects all their entry IDs into a
- * single set.
- *
- * @param txn a non null database transaction
- * @param lower The lower bound of the range. A 0 length byte array indicates
- * no lower bound and the range will start from the
- * smallest key.
- * @param upper The upper bound of the range. A 0 length byte array indicates
- * no upper bound and the range will end at the largest
- * key.
- * @param lowerIncluded true if a key exactly matching the lower bound
- * is included in the range, false if only keys
- * strictly greater than the lower bound are included.
- * This value is ignored if the lower bound is not
- * specified.
- * @param upperIncluded true if a key exactly matching the upper bound
- * is included in the range, false if only keys
- * strictly less than the upper bound are included.
- * This value is ignored if the upper bound is not
- * specified.
- * @return The non null set of entry IDs.
- */
- EntryIDSet readRange(ReadableTransaction txn,
- ByteSequence lower, ByteSequence upper, boolean lowerIncluded, boolean upperIncluded)
- {
- // If this index is not trusted, then just return an undefined id set.
- if (!trusted)
- {
- return newUndefinedSet();
- }
-
- try
- {
- // Total number of IDs found so far.
- int totalIDCount = 0;
-
- ArrayList<EntryIDSet> sets = new ArrayList<EntryIDSet>();
-
- Cursor<ByteString, ByteString> cursor = txn.openCursor(getName());
- try
- {
- boolean success;
- // Set the lower bound if necessary.
- if (lower.length() > 0)
- {
- // Initialize the cursor to the lower bound.
- success = cursor.positionToKeyOrNext(lower);
-
- // Advance past the lower bound if necessary.
- if (success && !lowerIncluded && cursor.getKey().equals(lower))
- {
- // Do not include the lower value.
- success = cursor.next();
- }
- }
- else
- {
- success = cursor.next();
- }
-
- if (!success)
- {
- // There are no values.
- return newDefinedSet();
- }
-
- // Step through the keys until we hit the upper bound or the last key.
- while (success)
- {
- // Check against the upper bound if necessary
- if (upper.length() > 0)
- {
- int cmp = cursor.getKey().compareTo(upper);
- if (cmp > 0 || (cmp == 0 && !upperIncluded))
- {
- break;
- }
- }
-
- EntryIDSet set = codec.decode(cursor.getKey(), cursor.getValue());
- if (!set.isDefined())
- {
- // There is no point continuing.
- return set;
- }
- totalIDCount += set.size();
- if (cursorEntryLimit > 0 && totalIDCount > cursorEntryLimit)
- {
- // There are too many. Give up and return an undefined list.
- return newUndefinedSet();
- }
- sets.add(set);
- success = cursor.next();
- }
-
- return newSetFromUnion(sets);
- }
- finally
- {
- cursor.close();
- }
- }
- catch (StorageRuntimeException e)
- {
- logger.traceException(e);
- return newUndefinedSet();
- }
- }
-
- int getEntryLimitExceededCount()
- {
- return entryLimitExceededCount;
- }
-
- void addEntry(IndexBuffer buffer, EntryID entryID, Entry entry, IndexingOptions options)
- throws StorageRuntimeException
- {
- HashSet<ByteString> addKeys = new HashSet<ByteString>();
- indexer.indexEntry(entry, addKeys, options);
-
- for (ByteString keyBytes : addKeys)
- {
- insertID(buffer, keyBytes, entryID);
- }
- }
-
- void removeEntry(IndexBuffer buffer, EntryID entryID, Entry entry, IndexingOptions options)
- throws StorageRuntimeException
- {
- HashSet<ByteString> delKeys = new HashSet<ByteString>();
- indexer.indexEntry(entry, delKeys, options);
-
- for (ByteString keyBytes : delKeys)
- {
- removeID(buffer, keyBytes, entryID);
- }
- }
-
- void modifyEntry(IndexBuffer buffer, EntryID entryID, Entry oldEntry, Entry newEntry, List<Modification> mods,
- IndexingOptions options) throws StorageRuntimeException
- {
- TreeMap<ByteString, Boolean> modifiedKeys = new TreeMap<ByteString, Boolean>();
- indexer.modifyEntry(oldEntry, newEntry, mods, modifiedKeys, options);
-
- for (Map.Entry<ByteString, Boolean> modifiedKey : modifiedKeys.entrySet())
- {
- if(modifiedKey.getValue())
- {
- insertID(buffer, modifiedKey.getKey(), entryID);
- }
- else
- {
- removeID(buffer, modifiedKey.getKey(), entryID);
- }
- }
- }
-
- boolean setIndexEntryLimit(int indexEntryLimit)
- {
- final boolean rebuildRequired = this.indexEntryLimit < indexEntryLimit && entryLimitExceededCount > 0;
- this.indexEntryLimit = indexEntryLimit;
- return rebuildRequired;
- }
-
- final void setIndexer(Indexer indexer)
- {
- this.indexer = indexer;
- }
-
- int getIndexEntryLimit()
- {
- return indexEntryLimit;
- }
-
- synchronized void setTrusted(WriteableTransaction txn, boolean trusted) throws StorageRuntimeException
- {
- this.trusted = trusted;
- if (trusted) {
- state.addFlagsToIndex(txn, getName(), TRUSTED);
- } else {
- state.removeFlagsFromIndex(txn, getName(), TRUSTED);
- }
- }
-
- synchronized boolean isTrusted()
- {
- return trusted;
- }
-
- synchronized boolean isRebuildRunning()
- {
- return false; // FIXME inline?
- }
-
- boolean getMaintainCount()
- {
- return maintainCount;
- }
+ void update(WriteableTransaction txn, ByteString key, EntryIDSet deletedIDs, EntryIDSet addedIDs);
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexBuffer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexBuffer.java
index c8dbee7..3b3666a 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexBuffer.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexBuffer.java
@@ -67,106 +67,74 @@
* state is only ever used when updating the id2children and id2subtree indexes when deleting an
* entry.
*/
- static class BufferedIndexValues
+ private static class BufferedIndexValues
{
- private EntryIDSet addedIDs;
- private EntryIDSet deletedIDs;
+ private EntryIDSet addedEntryIDs;
+ private EntryIDSet deletedEntryIDs;
- /**
- * Adds the provided entryID to this object associating it with the provided keyBytes.
- *
- * @param keyBytes the keyBytes mapping for this entryID
- * @param entryID the entryID to add
- */
- void addEntryID(ByteString keyBytes, EntryID entryID)
+ void addEntryID(EntryID entryID)
{
- if (!remove(deletedIDs, entryID))
+ if (!remove(deletedEntryIDs, entryID))
{
- if (this.addedIDs == null)
+ if (this.addedEntryIDs == null)
{
- this.addedIDs = newDefinedSet();
+ this.addedEntryIDs = newDefinedSet();
}
- this.addedIDs.add(entryID);
+ this.addedEntryIDs.add(entryID);
}
}
- /**
- * Deletes the provided entryID from this object.
- *
- * @param keyBytes the keyBytes mapping for this entryID
- * @param entryID the entryID to delete
- */
- void deleteEntryID(ByteString keyBytes, EntryID entryID)
+ void deleteEntryID(EntryID entryID)
{
- if (!remove(addedIDs, entryID))
+ if (!remove(addedEntryIDs, entryID))
{
- if (this.deletedIDs == null)
+ if (this.deletedEntryIDs == null)
{
- this.deletedIDs = newDefinedSet();
+ this.deletedEntryIDs = newDefinedSet();
}
- this.deletedIDs.add(entryID);
+ this.deletedEntryIDs.add(entryID);
}
}
- private boolean remove(EntryIDSet ids, EntryID entryID)
+ private static boolean remove(EntryIDSet entryIDs, EntryID entryID)
{
- if (ids != null && ids.contains(entryID))
- {
- ids.remove(entryID);
- return true;
- }
- return false;
+ return entryIDs != null ? entryIDs.remove(entryID) : false;
}
}
/** A simple class representing a pair of added and deleted VLV values. */
- static class BufferedVLVIndexValues
+ private static class BufferedVLVIndexValues
{
- private TreeSet<ByteString> addedValues;
- private TreeSet<ByteString> deletedValues;
+ private TreeSet<ByteString> addedSortKeys;
+ private TreeSet<ByteString> deletedSortKeys;
- /**
- * Adds the provided values to this object.
- *
- * @param sortValues the values to add
- */
- void addValues(ByteString sortValues)
+ void addSortKey(ByteString sortKey)
{
- if (!remove(deletedValues, sortValues))
+ if (!remove(deletedSortKeys, sortKey))
{
- if (addedValues == null)
+ if (addedSortKeys == null)
{
- addedValues = new TreeSet<ByteString>();
+ addedSortKeys = new TreeSet<ByteString>();
}
- addedValues.add(sortValues);
+ addedSortKeys.add(sortKey);
}
}
- /**
- * Deletes the provided values from this object.
- *
- * @param sortValues the values to delete
- */
- void deleteValues(ByteString sortValues)
+ void deleteSortKey(ByteString sortKey)
{
- if (!remove(addedValues, sortValues))
+ if (!remove(addedSortKeys, sortKey))
{
- if (deletedValues == null)
+ if (deletedSortKeys == null)
{
- deletedValues = new TreeSet<ByteString>();
+ deletedSortKeys = new TreeSet<ByteString>();
}
- deletedValues.add(sortValues);
+ deletedSortKeys.add(sortKey);
}
}
- private boolean remove(TreeSet<ByteString> values, ByteString sortValues)
+ private static boolean remove(TreeSet<ByteString> sortKeys, ByteString sortKey)
{
- if (values != null && values.contains(sortValues))
- {
- values.remove(sortValues);
- return true;
- }
- return false;
+ return sortKeys != null ? sortKeys.remove(sortKey) : false;
}
}
@@ -181,14 +149,7 @@
this.entryContainer = entryContainer;
}
- /**
- * Get the buffered VLV values for the given VLV index.
- *
- * @param vlvIndex The VLV index with the buffered values to retrieve.
- * @return The buffered VLV values or <code>null</code> if there are
- * no buffered VLV values for the specified VLV index.
- */
- BufferedVLVIndexValues getBufferedVLVIndexValues(VLVIndex vlvIndex)
+ private BufferedVLVIndexValues createOrGetBufferedVLVIndexValues(VLVIndex vlvIndex)
{
BufferedVLVIndexValues bufferedValues = bufferedVLVIndexes.get(vlvIndex);
if (bufferedValues == null)
@@ -199,16 +160,7 @@
return bufferedValues;
}
- /**
- * Get the buffered index values for the given index and keyBytes.
- *
- * @param index
- * The index for which to retrieve the buffered index values
- * @param keyBytes
- * The keyBytes for which to retrieve the buffered index values
- * @return The buffered index values, it can never be null
- */
- BufferedIndexValues getBufferedIndexValues(Index index, ByteString keyBytes)
+ private BufferedIndexValues createOrGetBufferedIndexValues(Index index, ByteString keyBytes)
{
BufferedIndexValues values = null;
@@ -250,7 +202,7 @@
{
for (Index index : attributeIndex.getAllIndexes())
{
- updateKeys(index, txn, bufferedIndexes.remove(index));
+ flushIndex(index, txn, bufferedIndexes.remove(index));
}
}
@@ -259,12 +211,12 @@
BufferedVLVIndexValues bufferedVLVValues = bufferedVLVIndexes.remove(vlvIndex);
if (bufferedVLVValues != null)
{
- vlvIndex.updateIndex(txn, bufferedVLVValues.addedValues, bufferedVLVValues.deletedValues);
+ vlvIndex.updateIndex(txn, bufferedVLVValues.addedSortKeys, bufferedVLVValues.deletedSortKeys);
}
}
final Index id2children = entryContainer.getID2Children();
- updateKeys(id2children, txn, bufferedIndexes.remove(id2children));
+ flushIndex(id2children, txn, bufferedIndexes.remove(id2children));
final Index id2subtree = entryContainer.getID2Subtree();
final TreeMap<ByteString, BufferedIndexValues> bufferedValues = bufferedIndexes.remove(id2subtree);
@@ -275,11 +227,36 @@
* entry processing in add/delete processing. This is necessary in order
* to avoid deadlocks.
*/
- updateKeys(id2subtree, txn, bufferedValues.descendingMap());
+ flushIndex(id2subtree, txn, bufferedValues.descendingMap());
}
}
- private void updateKeys(Index index, WriteableTransaction txn,
+ void put(Index index, ByteString key, EntryID entryID)
+ {
+ createOrGetBufferedIndexValues(index, key).addEntryID(entryID);
+ }
+
+ void put(VLVIndex index, ByteString sortKey)
+ {
+ createOrGetBufferedVLVIndexValues(index).addSortKey(sortKey);
+ }
+
+ void remove(VLVIndex index, ByteString sortKey)
+ {
+ createOrGetBufferedVLVIndexValues(index).deleteSortKey(sortKey);
+ }
+
+ void remove(Index index, ByteString key)
+ {
+ createOrGetBufferedIndexValues(index, key);
+ }
+
+ void remove(Index index, ByteString key, EntryID entryID)
+ {
+ createOrGetBufferedIndexValues(index, key).deleteEntryID(entryID);
+ }
+
+ private void flushIndex(Index index, WriteableTransaction txn,
Map<ByteString, BufferedIndexValues> bufferedValues)
{
if (bufferedValues != null)
@@ -290,9 +267,7 @@
final Map.Entry<ByteString, BufferedIndexValues> entry = it.next();
final ByteString key = entry.getKey();
final BufferedIndexValues values = entry.getValue();
-
- index.updateKey(txn, key, values.deletedIDs, values.addedIDs);
-
+ index.update(txn, key, values.deletedEntryIDs, values.addedEntryIDs);
it.remove();
}
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexFilter.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexFilter.java
index 94f9d6e..d17ed7a 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexFilter.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexFilter.java
@@ -114,7 +114,7 @@
*/
private EntryIDSet evaluateFilter(SearchFilter filter)
{
- EntryIDSet candidates = evaluate(filter);
+ EntryIDSet candidates = evaluateFilter0(filter);
if (buffer != null)
{
candidates.toString(buffer);
@@ -122,7 +122,7 @@
return candidates;
}
- private EntryIDSet evaluate(SearchFilter filter)
+ private EntryIDSet evaluateFilter0(SearchFilter filter)
{
switch (filter.getFilterType())
{
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexQueryFactoryImpl.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexQueryFactoryImpl.java
index 4ccfbb6..577e8d6 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexQueryFactoryImpl.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexQueryFactoryImpl.java
@@ -29,13 +29,18 @@
import static org.opends.messages.JebMessages.*;
import static org.opends.server.backends.pluggable.EntryIDSet.newUndefinedSet;
+import java.util.ArrayList;
import java.util.Collection;
import org.forgerock.i18n.LocalizableMessageBuilder;
+import org.forgerock.i18n.slf4j.LocalizedLogger;
import org.forgerock.opendj.ldap.ByteSequence;
+import org.forgerock.opendj.ldap.ByteString;
import org.forgerock.opendj.ldap.spi.IndexQueryFactory;
import org.forgerock.opendj.ldap.spi.IndexingOptions;
+import org.opends.server.backends.pluggable.spi.Cursor;
import org.opends.server.backends.pluggable.spi.ReadableTransaction;
+import org.opends.server.backends.pluggable.spi.StorageRuntimeException;
/**
* This class is an implementation of IndexQueryFactory which creates
@@ -43,6 +48,7 @@
*/
final class IndexQueryFactoryImpl implements IndexQueryFactory<IndexQuery>
{
+ private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
private static final String PRESENCE_INDEX_KEY = "presence";
@@ -85,7 +91,7 @@
return createMatchAllQuery().evaluate(debugMessage);
}
- final EntryIDSet entrySet = index.read(txn, key);
+ final EntryIDSet entrySet = index.get(txn, key);
if (debugMessage != null && !entrySet.isDefined())
{
updateStatsUndefinedResults(debugMessage, index);
@@ -103,35 +109,118 @@
/** {@inheritDoc} */
@Override
- public IndexQuery createRangeMatchQuery(final String indexID,
- final ByteSequence lowerBound, final ByteSequence upperBound,
- final boolean includeLowerBound, final boolean includeUpperBound)
+ public IndexQuery createRangeMatchQuery(final String indexID, final ByteSequence lowerBound,
+ final ByteSequence upperBound, final boolean includeLowerBound, final boolean includeUpperBound)
{
return new IndexQuery()
+ {
+ @Override
+ public EntryIDSet evaluate(LocalizableMessageBuilder debugMessage)
{
- @Override
- public EntryIDSet evaluate(LocalizableMessageBuilder debugMessage)
+ // Find the right index.
+ final Index index = attributeIndex.getIndexById(indexID);
+ if (index == null)
{
- // Find the right index.
- final Index index = attributeIndex.getIndexById(indexID);
- if (index == null)
+ if (debugMessage != null)
{
- if(debugMessage != null)
- {
- debugMessage.append(INFO_JEB_INDEX_FILTER_INDEX_TYPE_DISABLED.get(indexID, ""));
- }
- return createMatchAllQuery().evaluate(debugMessage);
+ debugMessage.append(INFO_JEB_INDEX_FILTER_INDEX_TYPE_DISABLED.get(indexID, ""));
}
-
- final EntryIDSet entrySet = index.readRange(txn, lowerBound, upperBound,
- includeLowerBound, includeUpperBound);
- if(debugMessage != null && !entrySet.isDefined())
- {
- updateStatsUndefinedResults(debugMessage, index);
- }
- return entrySet;
+ return createMatchAllQuery().evaluate(debugMessage);
}
+ final EntryIDSet entrySet = readRange(index, txn, lowerBound, upperBound, includeLowerBound, includeUpperBound);
+ if (debugMessage != null && !entrySet.isDefined())
+ {
+ updateStatsUndefinedResults(debugMessage, index);
+ }
+ return entrySet;
+ }
+
+ private final EntryIDSet readRange(Index index, ReadableTransaction txn, ByteSequence lower, ByteSequence upper,
+ boolean lowerIncluded, boolean upperIncluded)
+ {
+ // If this index is not trusted, then just return an undefined id set.
+ if (!index.isTrusted())
+ {
+ return newUndefinedSet();
+ }
+
+ try
+ {
+ // Total number of IDs found so far.
+ int totalIDCount = 0;
+ ArrayList<EntryIDSet> sets = new ArrayList<EntryIDSet>();
+ Cursor<ByteString, EntryIDSet> cursor = index.openCursor(txn);
+ try
+ {
+ boolean success;
+ // Set the lower bound if necessary.
+ if (lower.length() > 0)
+ {
+ // Initialize the cursor to the lower bound.
+ success = cursor.positionToKeyOrNext(lower);
+
+ // Advance past the lower bound if necessary.
+ if (success && !lowerIncluded && cursor.getKey().equals(lower))
+ {
+ // Do not include the lower value.
+ success = cursor.next();
+ }
+ }
+ else
+ {
+ success = cursor.next();
+ }
+
+ if (!success)
+ {
+ // There are no values.
+ return EntryIDSet.newDefinedSet();
+ }
+
+ // Step through the keys until we hit the upper bound or the last key.
+ while (success)
+ {
+ // Check against the upper bound if necessary
+ if (upper.length() > 0)
+ {
+ int cmp = cursor.getKey().compareTo(upper);
+ if (cmp > 0 || (cmp == 0 && !upperIncluded))
+ {
+ break;
+ }
+ }
+
+ EntryIDSet set = cursor.getValue();
+ if (!set.isDefined())
+ {
+ // There is no point continuing.
+ return set;
+ }
+ totalIDCount += set.size();
+ if (totalIDCount > IndexFilter.CURSOR_ENTRY_LIMIT)
+ {
+ // There are too many. Give up and return an undefined list.
+ return newUndefinedSet();
+ }
+ sets.add(set);
+ success = cursor.next();
+ }
+
+ return EntryIDSet.newSetFromUnion(sets);
+ }
+ finally
+ {
+ cursor.close();
+ }
+ }
+ catch (StorageRuntimeException e)
+ {
+ logger.traceException(e);
+ return newUndefinedSet();
+ }
+ }
+
@Override
public String toString()
{
@@ -186,7 +275,7 @@
return newUndefinedSet();
}
- final EntryIDSet entrySet = index.read(txn, PresenceIndexer.presenceKey);
+ final EntryIDSet entrySet = index.get(txn, AttributeIndex.PRESENCE_KEY);
if (debugMessage != null && !entrySet.isDefined())
{
updateStatsUndefinedResults(debugMessage, index);
@@ -208,10 +297,6 @@
{
debugMessage.append(INFO_JEB_INDEX_FILTER_INDEX_NOT_TRUSTED.get(index.getName()));
}
- else if (index.isRebuildRunning())
- {
- debugMessage.append(INFO_JEB_INDEX_FILTER_INDEX_REBUILD_IN_PROGRESS.get(index.getName()));
- }
else
{
debugMessage.append(INFO_JEB_INDEX_FILTER_INDEX_LIMIT_EXCEEDED.get(index.getName()));
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Indexer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Indexer.java
deleted file mode 100644
index 71f51ec..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Indexer.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions copyright 2012-2015 ForgeRock AS.
- */
-package org.opends.server.backends.pluggable;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.spi.IndexingOptions;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-
-/**
- * This class attempts to abstract the generation and comparison of keys
- * for an index. It is subclassed for the specific type of indexing.
- */
-abstract class Indexer
-{
- /**
- * Generate the set of index keys for an entry.
- *
- * @param entry The entry.
- * @param keys The set into which the generated keys will be inserted.
- * @param options The indexing options to use
- */
- public abstract void indexEntry(Entry entry, Set<ByteString> keys, IndexingOptions options);
-
- /**
- * Generate the set of index keys to be added and the set of index keys
- * to be deleted for an entry that was modified.
- *
- * @param oldEntry The original entry contents.
- * @param newEntry The new entry contents.
- * @param mods The set of modifications that were applied to the entry.
- * @param modifiedKeys The map into which the modified keys will be inserted.
- * @param options The indexing options to use
- */
- public abstract void modifyEntry(Entry oldEntry, Entry newEntry,
- List<Modification> mods, Map<ByteString, Boolean> modifiedKeys,
- IndexingOptions options);
-
- /**
- * Get a string representation of this object. The returned value is
- * used to name an index created using this object.
- * @return A string representation of this object.
- */
- @Override
- public abstract String toString();
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/NullIndex.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/NullIndex.java
index 8cf26e3..e61486e 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/NullIndex.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/NullIndex.java
@@ -24,154 +24,166 @@
*/
package org.opends.server.backends.pluggable;
-import static org.opends.server.backends.pluggable.EntryIDSet.*;
-
-import java.util.List;
-import java.util.Set;
+import static org.opends.server.backends.pluggable.EntryIDSet.newUndefinedSet;
import org.forgerock.opendj.ldap.ByteSequence;
import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ConditionResult;
-import org.forgerock.opendj.ldap.spi.IndexingOptions;
-import org.opends.server.backends.pluggable.State.IndexFlag;
+import org.opends.server.backends.pluggable.spi.Cursor;
import org.opends.server.backends.pluggable.spi.ReadableTransaction;
import org.opends.server.backends.pluggable.spi.StorageRuntimeException;
import org.opends.server.backends.pluggable.spi.TreeName;
import org.opends.server.backends.pluggable.spi.WriteableTransaction;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
/**
- * A null index which replaces id2children and id2subtree when they have been
- * disabled.
+ * A null index which replaces id2children and id2subtree when they have been disabled.
*/
-final class NullIndex extends Index
+final class NullIndex implements Index
{
+ private final TreeName name;
- NullIndex(TreeName name, Indexer indexer, State state, WriteableTransaction txn,
- EntryContainer entryContainer) throws StorageRuntimeException
+ NullIndex(TreeName name)
{
- super(name, indexer, state, 0, 0, false, txn, entryContainer);
- state.removeFlagsFromIndex(txn, name, IndexFlag.TRUSTED);
- super.delete(txn);
+ this.name = name;
}
@Override
- void updateKey(WriteableTransaction txn, ByteString key, EntryIDSet deletedIDs, EntryIDSet addedIDs)
+ public void update(WriteableTransaction txn, ByteString key, EntryIDSet deletedIDs, EntryIDSet addedIDs)
throws StorageRuntimeException
{
// Do nothing.
}
@Override
- void delete(IndexBuffer buffer, ByteString keyBytes)
- {
- // Do nothing.
- }
-
- @Override
- ConditionResult containsID(ReadableTransaction txn, ByteString key, EntryID entryID)
- throws StorageRuntimeException
- {
- return ConditionResult.UNDEFINED;
- }
-
- @Override
- EntryIDSet read(ReadableTransaction txn, ByteSequence key)
+ public EntryIDSet get(ReadableTransaction txn, ByteSequence key)
{
return newUndefinedSet();
}
@Override
- EntryIDSet readRange(ReadableTransaction txn, ByteSequence lower, ByteSequence upper, boolean lowerIncluded,
- boolean upperIncluded)
- {
- return newUndefinedSet();
- }
-
- @Override
- int getEntryLimitExceededCount()
- {
- return 0;
- }
-
- @Override
- void addEntry(IndexBuffer buffer, EntryID entryID, Entry entry, IndexingOptions options)
- throws StorageRuntimeException
- {
- // Do nothing.
- }
-
- @Override
- void removeEntry(IndexBuffer buffer, EntryID entryID, Entry entry, IndexingOptions options)
- throws StorageRuntimeException
- {
- // Do nothing.
- }
-
- @Override
- void modifyEntry(IndexBuffer buffer, EntryID entryID, Entry oldEntry, Entry newEntry, List<Modification> mods,
- IndexingOptions options) throws StorageRuntimeException
- {
- // Do nothing.
- }
-
- @Override
- boolean setIndexEntryLimit(int indexEntryLimit)
+ public boolean setIndexEntryLimit(int indexEntryLimit)
{
return false;
}
@Override
- int getIndexEntryLimit()
+ public int getIndexEntryLimit()
{
return 0;
}
@Override
- void setTrusted(WriteableTransaction txn, boolean trusted) throws StorageRuntimeException
+ public void setTrusted(WriteableTransaction txn, boolean trusted) throws StorageRuntimeException
{
// Do nothing.
}
@Override
- boolean isTrusted()
+ public boolean isTrusted()
{
return true;
}
@Override
- boolean isRebuildRunning()
+ public boolean getMaintainCount()
{
return false;
}
@Override
- boolean getMaintainCount()
- {
- return false;
- }
-
- @Override
- void open(WriteableTransaction txn) throws StorageRuntimeException
- {
- // Do nothing.
- }
-
- @Override
- long getRecordCount(ReadableTransaction txn) throws StorageRuntimeException
+ public long getRecordCount(ReadableTransaction txn) throws StorageRuntimeException
{
return 0;
}
@Override
- void delete(WriteableTransaction txn) throws StorageRuntimeException
+ public Cursor<ByteString, EntryIDSet> openCursor(ReadableTransaction txn)
+ {
+ return new Cursor<ByteString, EntryIDSet>()
+ {
+
+ @Override
+ public boolean positionToKey(ByteSequence key)
+ {
+ return false;
+ }
+
+ @Override
+ public boolean positionToKeyOrNext(ByteSequence key)
+ {
+ return false;
+ }
+
+ @Override
+ public boolean positionToLastKey()
+ {
+ return false;
+ }
+
+ @Override
+ public boolean positionToIndex(int index)
+ {
+ return false;
+ }
+
+ @Override
+ public boolean next()
+ {
+ return false;
+ }
+
+ @Override
+ public ByteString getKey()
+ {
+ return null;
+ }
+
+ @Override
+ public EntryIDSet getValue()
+ {
+ return null;
+ }
+
+ @Override
+ public void close()
+ {
+ // Nothing to do.
+ }
+
+ };
+ }
+
+ @Override
+ public void importRemove(WriteableTransaction txn, ImportIDSet idsToBeRemoved) throws StorageRuntimeException
{
// Do nothing.
}
@Override
- void indexEntry(Entry entry, Set<ByteString> keys, IndexingOptions options)
+ public void importPut(WriteableTransaction txn, ImportIDSet idsToBeAdded) throws StorageRuntimeException
+ {
+ // Do nothing.
+ }
+
+ @Override
+ public TreeName getName()
+ {
+ return name;
+ }
+
+ @Override
+ public void open(WriteableTransaction txn) throws StorageRuntimeException
+ {
+ // Do nothing.
+ }
+
+ @Override
+ public void delete(WriteableTransaction txn) throws StorageRuntimeException
+ {
+ // Do nothing.
+ }
+
+ @Override
+ public void setName(TreeName name)
{
// Do nothing.
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/PresenceIndexer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/PresenceIndexer.java
deleted file mode 100644
index 267a22c..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/PresenceIndexer.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.pluggable;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.spi.IndexingOptions;
-import org.opends.server.types.Attribute;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-
-/**
- * An implementation of an Indexer for attribute presence.
- */
-class PresenceIndexer extends Indexer
-{
- /** The key bytes used for the presence index. */
- private static final byte[] presenceKeyBytes = "+".getBytes();
-
- /** The key bytes used for the presence index as a {@link ByteString}. */
- static final ByteString presenceKey = ByteString.wrap(presenceKeyBytes);
-
- /** The attribute type for which this instance will generate index keys. */
- private final AttributeType attributeType;
-
- /**
- * Create a new attribute presence indexer.
- * @param attributeType The attribute type for which the indexer
- * is required.
- */
- PresenceIndexer(AttributeType attributeType)
- {
- this.attributeType = attributeType;
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return attributeType.getNameOrOID() + ".presence";
- }
-
- /** {@inheritDoc} */
- @Override
- public void indexEntry(Entry entry, Set<ByteString> keys, IndexingOptions options)
- {
- List<Attribute> attrList = entry.getAttribute(attributeType);
- if (attrList != null && !attrList.isEmpty())
- {
- keys.add(presenceKey);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void modifyEntry(Entry oldEntry, Entry newEntry,
- List<Modification> mods,
- Map<ByteString, Boolean> modifiedKeys, IndexingOptions options)
- {
- List<Attribute> newAttributes = newEntry.getAttribute(attributeType, true);
- List<Attribute> oldAttributes = oldEntry.getAttribute(attributeType, true);
- if(oldAttributes == null)
- {
- if(newAttributes != null)
- {
- modifiedKeys.put(presenceKey, true);
- }
- }
- else
- {
- if(newAttributes == null)
- {
- modifiedKeys.put(presenceKey, false);
- }
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/RootContainer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/RootContainer.java
index fa8429f..bf86719 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/RootContainer.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/RootContainer.java
@@ -503,13 +503,13 @@
if (timeLimit > 0)
{
// Get a list of all the databases used by the backend.
- ArrayList<DatabaseContainer> dbList = new ArrayList<DatabaseContainer>();
+ final List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>();
for (EntryContainer ec : entryContainers.values())
{
ec.sharedLock.lock();
try
{
- ec.listDatabases(dbList);
+ databases.addAll(ec.listDatabases());
}
finally
{
@@ -518,7 +518,7 @@
}
// Sort the list in order of priority.
- Collections.sort(dbList, new DbPreloadComparator());
+ Collections.sort(databases, new DbPreloadComparator());
// Preload each database until we reach the time limit or the cache
// is filled.
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/State.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/State.java
index caac24e..4cd3f05 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/State.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/State.java
@@ -46,7 +46,7 @@
* This class is responsible for storing the configuration state of
* the JE backend for a particular suffix.
*/
-class State extends DatabaseContainer
+class State extends AbstractDatabaseContainer
{
/**
@@ -165,7 +165,6 @@
* Ensure that the specified flags are not set for the given index
* @param txn a non null database transaction
* @param index The index storing the trusted state info.
- * @return The flags of the index
* @throws NullPointerException if txn, index or flags is null
* @throws StorageRuntimeException If an error occurs in the database.
*/
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Suffix.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Suffix.java
index 250b92c..9f94194 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Suffix.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Suffix.java
@@ -34,6 +34,7 @@
import java.util.concurrent.CountDownLatch;
import org.forgerock.i18n.slf4j.LocalizedLogger;
+import org.opends.server.backends.pluggable.AttributeIndex.MatchingRuleIndex;
import org.opends.server.backends.pluggable.Importer.DNCache;
import org.opends.server.backends.pluggable.spi.ReadableTransaction;
import org.opends.server.backends.pluggable.spi.StorageRuntimeException;
@@ -267,7 +268,7 @@
setTrusted(txn, attributeIndex.getSubstringIndex(), trusted);
setTrusted(txn, attributeIndex.getOrderingIndex(), trusted);
setTrusted(txn, attributeIndex.getApproximateIndex(), trusted);
- Map<String, Collection<Index>> exIndexes = attributeIndex.getExtensibleIndexes();
+ Map<String, Collection<MatchingRuleIndex>> exIndexes = attributeIndex.getExtensibleIndexes();
if(!exIndexes.isEmpty())
{
setTrusted(txn, exIndexes.get(EXTENSIBLE_INDEXER_ID_SUBSTRING), trusted);
@@ -287,7 +288,7 @@
}
}
- private void setTrusted(WriteableTransaction txn, Collection<Index> indexes, boolean trusted)
+ private void setTrusted(WriteableTransaction txn, Collection<MatchingRuleIndex> indexes, boolean trusted)
{
if (indexes != null)
{
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/VLVIndex.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/VLVIndex.java
index b8f037c..7014e6e 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/VLVIndex.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/VLVIndex.java
@@ -87,7 +87,7 @@
* "tie-breaker" and ensures that keys correspond to one and only one entry. This ensures that all
* database updates can be performed using lock-free operations.
*/
-class VLVIndex extends DatabaseContainer implements ConfigurationChangeListener<BackendVLVIndexCfg>, Closeable
+class VLVIndex extends AbstractDatabaseContainer implements ConfigurationChangeListener<BackendVLVIndexCfg>, Closeable
{
private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
@@ -166,9 +166,8 @@
}
@Override
- void open(final WriteableTransaction txn) throws StorageRuntimeException
+ void doOpen(final WriteableTransaction txn) throws StorageRuntimeException
{
- super.open(txn);
count.set((int) txn.getRecordCount(getName()));
}
@@ -352,7 +351,7 @@
{
if (shouldInclude(entry))
{
- buffer.getBufferedVLVIndexValues(this).addValues(encodeVLVKey(entry, entryID.longValue()));
+ buffer.put(this, encodeVLVKey(entry, entryID.longValue()));
}
}
@@ -418,7 +417,7 @@
{
if (shouldInclude(entry))
{
- buffer.getBufferedVLVIndexValues(this).deleteValues(encodeVLVKey(entry, entryID.longValue()));
+ buffer.remove(this, encodeVLVKey(entry, entryID.longValue()));
}
}
@@ -862,4 +861,11 @@
}
builder.append(separator);
}
+
+ void closeAndDelete(WriteableTransaction txn)
+ {
+ close();
+ delete(txn);
+ state.deleteRecord(txn, getName());
+ }
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/VerifyJob.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/VerifyJob.java
index be7fc27..b97484c 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/VerifyJob.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/VerifyJob.java
@@ -54,6 +54,7 @@
import org.forgerock.opendj.ldap.schema.MatchingRule;
import org.forgerock.opendj.ldap.spi.IndexingOptions;
import org.opends.server.backends.VerifyConfig;
+import org.opends.server.backends.pluggable.AttributeIndex.MatchingRuleIndex;
import org.opends.server.backends.pluggable.spi.Cursor;
import org.opends.server.backends.pluggable.spi.ReadOperation;
import org.opends.server.backends.pluggable.spi.ReadableTransaction;
@@ -922,7 +923,7 @@
* @param index The index database to be checked.
* @throws StorageRuntimeException If an error occurs in the database.
*/
- private void iterateAttrIndex(ReadableTransaction txn, Index index, IndexingOptions options)
+ private void iterateAttrIndex(ReadableTransaction txn, MatchingRuleIndex index, IndexingOptions options)
throws StorageRuntimeException
{
if (index == null)
@@ -1183,7 +1184,7 @@
{
try
{
- ConditionResult cr = id2c.containsID(txn, parentID.toByteString(), entryID);
+ ConditionResult cr = indexContainsID(id2c, txn, parentID.toByteString(), entryID);
if (cr == ConditionResult.FALSE)
{
if (logger.isTraceEnabled())
@@ -1247,7 +1248,7 @@
{
try
{
- ConditionResult cr = id2s.containsID(txn, id.toByteString(), entryID);
+ ConditionResult cr = indexContainsID(id2s, txn, id.toByteString(), entryID);
if (cr == ConditionResult.FALSE)
{
if (logger.isTraceEnabled())
@@ -1389,7 +1390,7 @@
if (presenceIndex != null)
{
- verifyAttributeInIndex(presenceIndex, txn, PresenceIndexer.presenceKey, entryID);
+ verifyAttributeInIndex(presenceIndex, txn, AttributeIndex.PRESENCE_KEY, entryID);
}
for (Attribute attr : attrList)
@@ -1433,7 +1434,7 @@
{
try
{
- ConditionResult cr = index.containsID(txn, key, entryID);
+ ConditionResult cr = indexContainsID(index, txn, key, entryID);
if (cr == ConditionResult.FALSE)
{
if (logger.isTraceEnabled())
@@ -1459,6 +1460,16 @@
}
}
+ private ConditionResult indexContainsID(Index index, ReadableTransaction txn, ByteString key, EntryID entryID)
+ {
+ EntryIDSet entryIDSet = index.get(txn, key);
+ if (entryIDSet.isDefined())
+ {
+ return ConditionResult.valueOf(entryIDSet.contains(entryID));
+ }
+ return ConditionResult.UNDEFINED;
+ }
+
private ByteString normalize(MatchingRule matchingRule, ByteString value) throws DirectoryException
{
try
--
Gitblit v1.10.0