| New file |
| | |
| | | /* |
| | | * CDDL HEADER START |
| | | * |
| | | * The contents of this file are subject to the terms of the |
| | | * Common Development and Distribution License, Version 1.0 only |
| | | * (the "License"). You may not use this file except in compliance |
| | | * with the License. |
| | | * |
| | | * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt |
| | | * or http://forgerock.org/license/CDDLv1.0.html. |
| | | * See the License for the specific language governing permissions |
| | | * and limitations under the License. |
| | | * |
| | | * When distributing Covered Code, include this CDDL HEADER in each |
| | | * file and include the License file at legal-notices/CDDLv1_0.txt. |
| | | * If applicable, add the following below this CDDL HEADER, with the |
| | | * fields enclosed by brackets "[]" replaced with your own identifying |
| | | * information: |
| | | * Portions Copyright [yyyy] [name of copyright owner] |
| | | * |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Copyright 2006-2008 Sun Microsystems, Inc. |
| | | * Portions Copyright 2011-2015 ForgeRock AS |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | |
| | | /** |
| | | * This class is a wrapper around the tree object and provides basic |
| | | * read and write methods for entries. |
| | | */ |
| | | abstract class AbstractDatabaseContainer implements DatabaseContainer |
| | | { |
| | | /** The name of the database within the entryContainer. */ |
| | | private TreeName name; |
| | | |
| | | AbstractDatabaseContainer(final TreeName name) |
| | | { |
| | | this.name = name; |
| | | } |
| | | |
| | | @Override |
| | | public final void open(WriteableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | txn.openTree(name); |
| | | doOpen(txn); |
| | | } |
| | | |
| | | /** |
| | | * Override in order to perform any additional initialization after the index has opened. |
| | | */ |
| | | void doOpen(WriteableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | // Do nothing by default. |
| | | } |
| | | |
| | | @Override |
| | | public final void delete(WriteableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | txn.deleteTree(name); |
| | | } |
| | | |
| | | @Override |
| | | public final long getRecordCount(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | return txn.getRecordCount(name); |
| | | } |
| | | |
| | | @Override |
| | | public final TreeName getName() |
| | | { |
| | | return name; |
| | | } |
| | | |
| | | @Override |
| | | public final void setName(TreeName name) |
| | | { |
| | | this.name = name; |
| | | } |
| | | |
| | | @Override |
| | | public final String toString() |
| | | { |
| | | return name.toString(); |
| | | } |
| | | } |
| | |
| | | import org.forgerock.opendj.config.server.ConfigChangeResult; |
| | | import org.forgerock.opendj.config.server.ConfigException; |
| | | import org.forgerock.opendj.ldap.Assertion; |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.DecodeException; |
| | | import org.forgerock.opendj.ldap.schema.MatchingRule; |
| | | import org.forgerock.opendj.ldap.schema.Schema; |
| | | import org.forgerock.opendj.ldap.spi.IndexQueryFactory; |
| | | import org.forgerock.opendj.ldap.spi.Indexer; |
| | | import org.forgerock.opendj.ldap.spi.IndexingOptions; |
| | | import org.opends.server.admin.server.ConfigurationChangeListener; |
| | | import org.opends.server.admin.std.meta.BackendIndexCfgDefn.IndexType; |
| | |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * This class implements an attribute indexer for matching rules in JE Backend. |
| | | */ |
| | | final class MatchingRuleIndex extends DefaultIndex |
| | | { |
| | | /** |
| | | * The matching rule's indexer. |
| | | */ |
| | | private final Indexer indexer; |
| | | |
| | | MatchingRuleIndex(WriteableTransaction txn, BackendIndexCfg cfg, Indexer indexer) |
| | | { |
| | | super(getIndexName(attributeType, indexer.getIndexID()), state, cfg.getIndexEntryLimit(), false, txn, |
| | | entryContainer); |
| | | this.indexer = indexer; |
| | | } |
| | | |
| | | void indexEntry(Entry entry, Set<ByteString> keys, IndexingOptions options) |
| | | { |
| | | List<Attribute> attributes = entry.getAttribute(attributeType, true); |
| | | if (attributes != null) |
| | | { |
| | | indexAttribute(attributes, keys, options); |
| | | } |
| | | } |
| | | |
| | | void modifyEntry(Entry oldEntry, Entry newEntry, List<Modification> mods, Map<ByteString, Boolean> modifiedKeys, |
| | | IndexingOptions options) |
| | | { |
| | | List<Attribute> oldAttributes = oldEntry.getAttribute(attributeType, true); |
| | | if (oldAttributes != null) |
| | | { |
| | | final Set<ByteString> keys = new HashSet<ByteString>(); |
| | | indexAttribute(oldAttributes, keys, options); |
| | | for (ByteString key : keys) |
| | | { |
| | | modifiedKeys.put(key, false); |
| | | } |
| | | } |
| | | |
| | | List<Attribute> newAttributes = newEntry.getAttribute(attributeType, true); |
| | | if (newAttributes != null) |
| | | { |
| | | final Set<ByteString> keys = new HashSet<ByteString>(); |
| | | indexAttribute(newAttributes, keys, options); |
| | | for (ByteString key : keys) |
| | | { |
| | | final Boolean needsAdding = modifiedKeys.get(key); |
| | | if (needsAdding == null) |
| | | { |
| | | // This value has been added. |
| | | modifiedKeys.put(key, true); |
| | | } |
| | | else if (!needsAdding) |
| | | { |
| | | // This value has not been added or removed. |
| | | modifiedKeys.remove(key); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | private void indexAttribute(List<Attribute> attributes, Set<ByteString> keys, IndexingOptions options) |
| | | { |
| | | for (Attribute attr : attributes) |
| | | { |
| | | if (!attr.isVirtual()) |
| | | { |
| | | for (ByteString value : attr) |
| | | { |
| | | try |
| | | { |
| | | indexer.createKeys(Schema.getDefaultSchema(), value, options, keys); |
| | | |
| | | /* |
| | | * Optimization for presence: return immediately after first value since all values |
| | | * have the same key. |
| | | */ |
| | | if (indexer == PRESENCE_INDEXER) |
| | | { |
| | | return; |
| | | } |
| | | } |
| | | catch (DecodeException e) |
| | | { |
| | | logger.traceException(e); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | /** The key bytes used for the presence index as a {@link ByteString}. */ |
| | | static final ByteString PRESENCE_KEY = ByteString.valueOf("+"); |
| | | |
| | | /** |
| | | * A special indexer for generating presence indexes. |
| | | */ |
| | | private static final Indexer PRESENCE_INDEXER = new Indexer() |
| | | { |
| | | @Override |
| | | public void createKeys(Schema schema, ByteSequence value, IndexingOptions options, Collection<ByteString> keys) |
| | | throws DecodeException |
| | | { |
| | | keys.add(PRESENCE_KEY); |
| | | } |
| | | |
| | | @Override |
| | | public String getIndexID() |
| | | { |
| | | return IndexType.PRESENCE.toString(); |
| | | } |
| | | }; |
| | | |
| | | /* |
| | | * FIXME Matthew Swift: Once the matching rules have been migrated we should |
| | | * revisit this class. All of the evaluateXXX methods should go (the Matcher |
| | |
| | | private final EntryContainer entryContainer; |
| | | |
| | | /** The attribute index configuration. */ |
| | | private BackendIndexCfg indexConfig; |
| | | private BackendIndexCfg config; |
| | | |
| | | /** The mapping from names to indexes. */ |
| | | private final Map<String, Index> nameToIndexes = new HashMap<String, Index>(); |
| | | private final Map<String, MatchingRuleIndex> nameToIndexes = new HashMap<String, MatchingRuleIndex>(); |
| | | private final IndexingOptions indexingOptions; |
| | | private final State state; |
| | | |
| | | /** The attribute type for which this instance will generate index keys. */ |
| | | private final AttributeType attributeType; |
| | | |
| | | /** |
| | | * The mapping from extensible index types (e.g. "substring" or "shared") to list of indexes. |
| | | */ |
| | | private Map<String, Collection<Index>> extensibleIndexesMapping; |
| | | private Map<String, Collection<MatchingRuleIndex>> extensibleIndexesMapping; |
| | | |
| | | /** |
| | | * Create a new attribute index object. |
| | | * |
| | | * @param indexConfig The attribute index configuration. |
| | | * @param entryContainer The entryContainer of this attribute index. |
| | | * @param txn a non null database transaction |
| | | * @throws ConfigException if a configuration related error occurs. |
| | | */ |
| | | AttributeIndex(BackendIndexCfg indexConfig, EntryContainer entryContainer, WriteableTransaction txn) |
| | | AttributeIndex(BackendIndexCfg config, State state, EntryContainer entryContainer, WriteableTransaction txn) |
| | | throws ConfigException |
| | | { |
| | | this.entryContainer = entryContainer; |
| | | this.indexConfig = indexConfig; |
| | | this.config = config; |
| | | this.state = state; |
| | | this.attributeType = config.getAttribute(); |
| | | |
| | | buildPresenceIndex(txn); |
| | | buildIndexes(txn, IndexType.EQUALITY); |
| | |
| | | buildIndexes(txn, IndexType.APPROXIMATE); |
| | | buildExtensibleIndexes(txn); |
| | | |
| | | indexingOptions = new JEIndexConfig(indexConfig.getSubstringLength()); |
| | | indexingOptions = new IndexingOptionsImpl(config.getSubstringLength()); |
| | | extensibleIndexesMapping = computeExtensibleIndexesMapping(); |
| | | } |
| | | |
| | | private void buildPresenceIndex(WriteableTransaction txn) |
| | | { |
| | | final IndexType indexType = IndexType.PRESENCE; |
| | | if (indexConfig.getIndexType().contains(indexType)) |
| | | if (config.getIndexType().contains(indexType)) |
| | | { |
| | | String indexID = indexType.toString(); |
| | | nameToIndexes.put(indexID, newPresenceIndex(txn, indexConfig)); |
| | | nameToIndexes.put(indexID, new MatchingRuleIndex(txn, config, PRESENCE_INDEXER)); |
| | | } |
| | | } |
| | | |
| | | private Index newPresenceIndex(WriteableTransaction txn, BackendIndexCfg cfg) |
| | | { |
| | | final AttributeType attrType = cfg.getAttribute(); |
| | | final TreeName indexName = getIndexName(attrType, IndexType.PRESENCE.toString()); |
| | | final PresenceIndexer indexer = new PresenceIndexer(attrType); |
| | | return entryContainer.newIndexForAttribute(txn, indexName, indexer, cfg.getIndexEntryLimit()); |
| | | } |
| | | |
| | | private void buildExtensibleIndexes(WriteableTransaction txn) throws ConfigException |
| | | { |
| | | final IndexType indexType = IndexType.EXTENSIBLE; |
| | | if (indexConfig.getIndexType().contains(indexType)) |
| | | if (config.getIndexType().contains(indexType)) |
| | | { |
| | | final AttributeType attrType = indexConfig.getAttribute(); |
| | | Set<String> extensibleRules = indexConfig.getIndexExtensibleMatchingRule(); |
| | | final AttributeType attrType = config.getAttribute(); |
| | | Set<String> extensibleRules = config.getIndexExtensibleMatchingRule(); |
| | | if (extensibleRules == null || extensibleRules.isEmpty()) |
| | | { |
| | | throw new ConfigException(ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE.get(attrType, indexType.toString())); |
| | |
| | | logger.error(ERR_CONFIG_INDEX_TYPE_NEEDS_VALID_MATCHING_RULE, attrType, ruleName); |
| | | continue; |
| | | } |
| | | for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers()) |
| | | for (Indexer indexer : rule.getIndexers()) |
| | | { |
| | | final String indexId = indexer.getIndexID(); |
| | | if (!nameToIndexes.containsKey(indexId)) |
| | | { |
| | | // There is no index available for this index id. Create a new index |
| | | nameToIndexes.put(indexId, newAttributeIndex(txn, indexConfig, indexer)); |
| | | nameToIndexes.put(indexId, new MatchingRuleIndex(txn, config, indexer)); |
| | | } |
| | | } |
| | | } |
| | |
| | | |
| | | private void buildIndexes(WriteableTransaction txn, IndexType indexType) throws ConfigException |
| | | { |
| | | if (indexConfig.getIndexType().contains(indexType)) |
| | | if (config.getIndexType().contains(indexType)) |
| | | { |
| | | final AttributeType attrType = indexConfig.getAttribute(); |
| | | final AttributeType attrType = config.getAttribute(); |
| | | final String indexID = indexType.toString(); |
| | | final MatchingRule rule = getMatchingRule(indexType, attrType); |
| | | if (rule == null) |
| | |
| | | throw new ConfigException(ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE.get(attrType, indexID)); |
| | | } |
| | | |
| | | for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers()) |
| | | for (Indexer indexer : rule.getIndexers()) |
| | | { |
| | | nameToIndexes.put(indexID, newAttributeIndex(txn, indexConfig, indexer)); |
| | | nameToIndexes.put(indexID, new MatchingRuleIndex(txn, config, indexer)); |
| | | } |
| | | } |
| | | } |
| | |
| | | } |
| | | } |
| | | |
| | | private Index newAttributeIndex(WriteableTransaction txn, BackendIndexCfg indexConfig, |
| | | org.forgerock.opendj.ldap.spi.Indexer indexer) |
| | | { |
| | | final AttributeType attrType = indexConfig.getAttribute(); |
| | | final TreeName indexName = getIndexName(attrType, indexer.getIndexID()); |
| | | final AttributeIndexer attrIndexer = new AttributeIndexer(attrType, indexer); |
| | | return entryContainer.newIndexForAttribute(txn, indexName, attrIndexer, indexConfig.getIndexEntryLimit()); |
| | | } |
| | | |
| | | private TreeName getIndexName(AttributeType attrType, String indexID) |
| | | { |
| | | final String attrIndexId = attrType.getNameOrOID() + "." + indexID; |
| | |
| | | { |
| | | index.open(txn); |
| | | } |
| | | indexConfig.addChangeListener(this); |
| | | config.addChangeListener(this); |
| | | } |
| | | |
| | | @Override |
| | | public void close() |
| | | { |
| | | indexConfig.removeChangeListener(this); |
| | | config.removeChangeListener(this); |
| | | } |
| | | |
| | | /** |
| | |
| | | */ |
| | | AttributeType getAttributeType() |
| | | { |
| | | return indexConfig.getAttribute(); |
| | | return config.getAttribute(); |
| | | } |
| | | |
| | | /** |
| | |
| | | */ |
| | | BackendIndexCfg getConfiguration() |
| | | { |
| | | return indexConfig; |
| | | return config; |
| | | } |
| | | |
| | | /** |
| | |
| | | void addEntry(IndexBuffer buffer, EntryID entryID, Entry entry) |
| | | throws StorageRuntimeException, DirectoryException |
| | | { |
| | | for (Index index : nameToIndexes.values()) |
| | | for (MatchingRuleIndex index : nameToIndexes.values()) |
| | | { |
| | | index.addEntry(buffer, entryID, entry, indexingOptions); |
| | | HashSet<ByteString> keys = new HashSet<ByteString>(); |
| | | index.indexEntry(entry, keys, indexingOptions); |
| | | for (ByteString key : keys) |
| | | { |
| | | buffer.put(index, key, entryID); |
| | | } |
| | | } |
| | | } |
| | | |
| | |
| | | void removeEntry(IndexBuffer buffer, EntryID entryID, Entry entry) |
| | | throws StorageRuntimeException, DirectoryException |
| | | { |
| | | for (Index index : nameToIndexes.values()) |
| | | for (MatchingRuleIndex index : nameToIndexes.values()) |
| | | { |
| | | index.removeEntry(buffer, entryID, entry, indexingOptions); |
| | | HashSet<ByteString> keys = new HashSet<ByteString>(); |
| | | index.indexEntry(entry, keys, indexingOptions); |
| | | for (ByteString key : keys) |
| | | { |
| | | buffer.remove(index, key, entryID); |
| | | } |
| | | } |
| | | } |
| | | |
| | |
| | | List<Modification> mods) |
| | | throws StorageRuntimeException |
| | | { |
| | | for (Index index : nameToIndexes.values()) |
| | | for (MatchingRuleIndex index : nameToIndexes.values()) |
| | | { |
| | | index.modifyEntry(buffer, entryID, oldEntry, newEntry, mods, indexingOptions); |
| | | TreeMap<ByteString, Boolean> modifiedKeys = new TreeMap<ByteString, Boolean>(); |
| | | index.modifyEntry(oldEntry, newEntry, mods, modifiedKeys, indexingOptions); |
| | | for (Map.Entry<ByteString, Boolean> modifiedKey : modifiedKeys.entrySet()) |
| | | { |
| | | if (modifiedKey.getValue()) |
| | | { |
| | | buffer.put(index, modifiedKey.getKey(), entryID); |
| | | } |
| | | else |
| | | { |
| | | buffer.remove(index, modifiedKey.getKey(), entryID); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | |
| | | // concurrent writers. |
| | | Set<ByteString> set = new HashSet<ByteString>(); |
| | | |
| | | int substrLength = indexConfig.getSubstringLength(); |
| | | int substrLength = config.getSubstringLength(); |
| | | |
| | | // Example: The value is ABCDE and the substring length is 3. |
| | | // We produce the keys ABC BCD CDE DE E |
| | |
| | | |
| | | if (debugBuffer != null) |
| | | { |
| | | debugBuffer.append("[INDEX:").append(indexConfig.getAttribute().getNameOrOID()) |
| | | debugBuffer.append("[INDEX:").append(config.getAttribute().getNameOrOID()) |
| | | .append(".").append(indexName).append("]"); |
| | | } |
| | | |
| | |
| | | } |
| | | |
| | | /** |
| | | * Return the number of values that have exceeded the entry limit since this |
| | | * object was created. |
| | | * |
| | | * @return The number of values that have exceeded the entry limit. |
| | | */ |
| | | long getEntryLimitExceededCount() |
| | | { |
| | | long entryLimitExceededCount = 0; |
| | | |
| | | for (Index index : nameToIndexes.values()) |
| | | { |
| | | entryLimitExceededCount += index.getEntryLimitExceededCount(); |
| | | } |
| | | return entryLimitExceededCount; |
| | | } |
| | | |
| | | /** |
| | | * Get a list of the databases opened by this attribute index. |
| | | * @param dbList A list of database containers. |
| | | */ |
| | |
| | | }); |
| | | |
| | | extensibleIndexesMapping = computeExtensibleIndexesMapping(); |
| | | indexConfig = cfg; |
| | | config = cfg; |
| | | } |
| | | catch(Exception e) |
| | | { |
| | |
| | | continue; |
| | | } |
| | | validRules.add(rule); |
| | | for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers()) |
| | | for (Indexer indexer : rule.getIndexers()) |
| | | { |
| | | String indexId = indexer.getIndexID(); |
| | | validIndexIds.add(indexId); |
| | | if (!nameToIndexes.containsKey(indexId)) |
| | | { |
| | | Index index = newAttributeIndex(txn, cfg, indexer); |
| | | MatchingRuleIndex index = new MatchingRuleIndex(txn, cfg, indexer); |
| | | openIndex(txn, index, ccr); |
| | | nameToIndexes.put(indexId, index); |
| | | } |
| | |
| | | ccr.setAdminActionRequired(true); |
| | | ccr.addMessage(NOTE_JEB_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(index.getName())); |
| | | } |
| | | if (indexConfig.getSubstringLength() != cfg.getSubstringLength()) |
| | | { |
| | | index.setIndexer(new AttributeIndexer(attrType, indexer)); |
| | | } |
| | | } |
| | | } |
| | | } |
| | |
| | | for (MatchingRule rule: rulesToDelete) |
| | | { |
| | | final List<String> indexIdsToRemove = new ArrayList<String>(); |
| | | for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers()) |
| | | for (Indexer indexer : rule.getIndexers()) |
| | | { |
| | | final String indexId = indexer.getIndexID(); |
| | | if (!validIndexIds.contains(indexId)) |
| | |
| | | private Set<MatchingRule> getCurrentExtensibleMatchingRules() |
| | | { |
| | | final Set<MatchingRule> rules = new HashSet<MatchingRule>(); |
| | | for (String ruleName : indexConfig.getIndexExtensibleMatchingRule()) |
| | | for (String ruleName : config.getIndexExtensibleMatchingRule()) |
| | | { |
| | | final MatchingRule rule = DirectoryServer.getMatchingRule(toLowerCase(ruleName)); |
| | | if (rule != null) |
| | |
| | | final ConfigChangeResult ccr) |
| | | { |
| | | String indexId = indexType.toString(); |
| | | Index index = nameToIndexes.get(indexId); |
| | | MatchingRuleIndex index = nameToIndexes.get(indexId); |
| | | if (!cfg.getIndexType().contains(indexType)) |
| | | { |
| | | removeIndex(txn, index, indexType); |
| | |
| | | if (index == null) |
| | | { |
| | | final MatchingRule matchingRule = getMatchingRule(indexType, cfg.getAttribute()); |
| | | for (org.forgerock.opendj.ldap.spi.Indexer indexer : matchingRule.getIndexers()) |
| | | for (Indexer indexer : matchingRule.getIndexers()) |
| | | { |
| | | index = newAttributeIndex(txn, cfg, indexer); |
| | | index = new MatchingRuleIndex(txn, cfg, indexer); |
| | | openIndex(txn, index, ccr); |
| | | nameToIndexes.put(indexId, index); |
| | | } |
| | |
| | | ccr.setAdminActionRequired(true); |
| | | ccr.addMessage(NOTE_JEB_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(index.getName())); |
| | | } |
| | | if (indexType == IndexType.SUBSTRING && config.getSubstringLength() != cfg.getSubstringLength()) |
| | | { |
| | | ccr.setAdminActionRequired(true); |
| | | // FIXME: msg? |
| | | ccr.addMessage(NOTE_JEB_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(index.getName())); |
| | | } |
| | | } |
| | | } |
| | | |
| | |
| | | { |
| | | final IndexType indexType = IndexType.PRESENCE; |
| | | final String indexID = indexType.toString(); |
| | | Index index = nameToIndexes.get(indexID); |
| | | MatchingRuleIndex index = nameToIndexes.get(indexID); |
| | | if (!cfg.getIndexType().contains(indexType)) |
| | | { |
| | | removeIndex(txn, index, indexType); |
| | |
| | | |
| | | if (index == null) |
| | | { |
| | | index = newPresenceIndex(txn, cfg); |
| | | index = new MatchingRuleIndex(txn, cfg, PRESENCE_INDEXER); |
| | | openIndex(txn, index, ccr); |
| | | nameToIndexes.put(indexID, index); |
| | | } |
| | |
| | | { |
| | | return entryContainer.getDatabasePrefix() |
| | | + "_" |
| | | + indexConfig.getAttribute().getNameOrOID(); |
| | | + config.getAttribute().getNameOrOID(); |
| | | } |
| | | |
| | | /** |
| | |
| | | * |
| | | * @return The equality index. |
| | | */ |
| | | Index getEqualityIndex() |
| | | MatchingRuleIndex getEqualityIndex() |
| | | { |
| | | return getIndexById(IndexType.EQUALITY.toString()); |
| | | } |
| | |
| | | * |
| | | * @return The approximate index. |
| | | */ |
| | | Index getApproximateIndex() |
| | | MatchingRuleIndex getApproximateIndex() |
| | | { |
| | | return getIndexById(IndexType.APPROXIMATE.toString()); |
| | | } |
| | |
| | | * |
| | | * @return The ordering index. |
| | | */ |
| | | Index getOrderingIndex() |
| | | MatchingRuleIndex getOrderingIndex() |
| | | { |
| | | return getIndexById(IndexType.ORDERING.toString()); |
| | | } |
| | |
| | | * |
| | | * @return The substring index. |
| | | */ |
| | | Index getSubstringIndex() |
| | | MatchingRuleIndex getSubstringIndex() |
| | | { |
| | | return getIndexById(IndexType.SUBSTRING.toString()); |
| | | } |
| | |
| | | * |
| | | * @return The presence index. |
| | | */ |
| | | Index getPresenceIndex() |
| | | MatchingRuleIndex getPresenceIndex() |
| | | { |
| | | return getIndexById(IndexType.PRESENCE.toString()); |
| | | } |
| | |
| | | * @return The index identified by the provided identifier, or null if no such |
| | | * index exists |
| | | */ |
| | | Index getIndexById(String indexId) |
| | | MatchingRuleIndex getIndexById(String indexId) |
| | | { |
| | | return nameToIndexes.get(indexId); |
| | | } |
| | |
| | | * |
| | | * @return The map containing entries (extensible index type, list of indexes) |
| | | */ |
| | | Map<String, Collection<Index>> getExtensibleIndexes() |
| | | Map<String, Collection<MatchingRuleIndex>> getExtensibleIndexes() |
| | | { |
| | | return extensibleIndexesMapping; |
| | | } |
| | | |
| | | private Map<String, Collection<Index>> computeExtensibleIndexesMapping() |
| | | private Map<String, Collection<MatchingRuleIndex>> computeExtensibleIndexesMapping() |
| | | { |
| | | final Collection<Index> substring = new ArrayList<Index>(); |
| | | final Collection<Index> shared = new ArrayList<Index>(); |
| | | for (Map.Entry<String, Index> entry : nameToIndexes.entrySet()) |
| | | final Collection<MatchingRuleIndex> substring = new ArrayList<MatchingRuleIndex>(); |
| | | final Collection<MatchingRuleIndex> shared = new ArrayList<MatchingRuleIndex>(); |
| | | for (Map.Entry<String, MatchingRuleIndex> entry : nameToIndexes.entrySet()) |
| | | { |
| | | final String indexId = entry.getKey(); |
| | | if (isDefaultIndex(indexId)) { |
| | |
| | | shared.add(entry.getValue()); |
| | | } |
| | | } |
| | | final Map<String, Collection<Index>> indexMap = new HashMap<String,Collection<Index>>(); |
| | | final Map<String, Collection<MatchingRuleIndex>> indexMap = new HashMap<String, Collection<MatchingRuleIndex>>(); |
| | | indexMap.put(EXTENSIBLE_INDEXER_ID_SUBSTRING, substring); |
| | | indexMap.put(EXTENSIBLE_INDEXER_ID_SHARED, shared); |
| | | return Collections.unmodifiableMap(indexMap); |
| | |
| | | * 1. There is no matching rule provided |
| | | * 2. The matching rule specified is actually the default equality. |
| | | */ |
| | | MatchingRule eqRule = indexConfig.getAttribute().getEqualityMatchingRule(); |
| | | MatchingRule eqRule = config.getAttribute().getEqualityMatchingRule(); |
| | | if (matchRuleOID == null |
| | | || matchRuleOID.equals(eqRule.getOID()) |
| | | || matchRuleOID.equalsIgnoreCase(eqRule.getNameOrOID())) |
| | |
| | | } |
| | | |
| | | MatchingRule rule = DirectoryServer.getMatchingRule(matchRuleOID); |
| | | if (!ruleHasAtLeasOneIndex(rule)) |
| | | if (!ruleHasAtLeastOneIndex(rule)) |
| | | { |
| | | if (monitor.isFilterUseEnabled()) |
| | | { |
| | | monitor.updateStats(filter, INFO_JEB_INDEX_FILTER_MATCHING_RULE_NOT_INDEXED.get( |
| | | matchRuleOID, indexConfig.getAttribute().getNameOrOID())); |
| | | matchRuleOID, config.getAttribute().getNameOrOID())); |
| | | } |
| | | return IndexQuery.createNullIndexQuery().evaluate(null); |
| | | } |
| | |
| | | if (debugBuffer != null) |
| | | { |
| | | debugBuffer.append("[INDEX:"); |
| | | for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers()) |
| | | for (Indexer indexer : rule.getIndexers()) |
| | | { |
| | | debugBuffer.append(" ") |
| | | .append(filter.getAttributeType().getNameOrOID()) |
| | |
| | | } |
| | | } |
| | | |
| | | private boolean ruleHasAtLeasOneIndex(MatchingRule rule) |
| | | private boolean ruleHasAtLeastOneIndex(MatchingRule rule) |
| | | { |
| | | for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.getIndexers()) |
| | | for (Indexer indexer : rule.getIndexers()) |
| | | { |
| | | if (nameToIndexes.containsKey(indexer.getIndexID())) |
| | | { |
| | |
| | | return false; |
| | | } |
| | | |
| | | /** This class extends the IndexConfig for JE Backend. */ |
| | | private final class JEIndexConfig implements IndexingOptions |
| | | /** Indexing options implementation. */ |
| | | private final class IndexingOptionsImpl implements IndexingOptions |
| | | { |
| | | /** The length of the substring index. */ |
| | | private int substringLength; |
| | | /** The length of substring keys used in substring indexes. */ |
| | | private int substringKeySize; |
| | | |
| | | /** |
| | | * Creates a new JEIndexConfig instance. |
| | | * @param substringLength The length of the substring. |
| | | */ |
| | | private JEIndexConfig(int substringLength) |
| | | private IndexingOptionsImpl(int substringKeySize) |
| | | { |
| | | this.substringLength = substringLength; |
| | | this.substringKeySize = substringKeySize; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public int substringKeySize() |
| | | { |
| | | return substringLength; |
| | | return substringKeySize; |
| | | } |
| | | } |
| | | |
| | | void closeAndDelete(WriteableTransaction txn) |
| | | { |
| | | close(); |
| | | for (Index index : nameToIndexes.values()) |
| | | { |
| | | index.delete(txn); |
| | | state.deleteRecord(txn, index.getName()); |
| | | } |
| | | } |
| | | } |
| | |
| | | * for each entry. The key is the normalized entry DN and the value |
| | | * is the entry ID. |
| | | */ |
| | | class DN2ID extends DatabaseContainer |
| | | class DN2ID extends AbstractDatabaseContainer |
| | | { |
| | | private final int prefixRDNComponents; |
| | | |
| | |
| | | DN2ID(TreeName treeName, EntryContainer entryContainer) throws StorageRuntimeException |
| | | { |
| | | super(treeName); |
| | | prefixRDNComponents = entryContainer.getBaseDN().size(); |
| | | this.prefixRDNComponents = entryContainer.getBaseDN().size(); |
| | | } |
| | | |
| | | /** |
| | |
| | | * as in the DN database so that all referrals in a subtree can be retrieved by |
| | | * cursoring through a range of the records. |
| | | */ |
| | | class DN2URI extends DatabaseContainer |
| | | class DN2URI extends AbstractDatabaseContainer |
| | | { |
| | | private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); |
| | | |
| | |
| | | * This class is a wrapper around the tree object and provides basic |
| | | * read and write methods for entries. |
| | | */ |
| | | abstract class DatabaseContainer |
| | | interface DatabaseContainer |
| | | { |
| | | /** The name of the database within the entryContainer. */ |
| | | private TreeName name; |
| | | |
| | | /** |
| | | * Create a new DatabaseContainer object. |
| | | * |
| | | * @param treeName The name of the entry database. |
| | | */ |
| | | DatabaseContainer(TreeName treeName) |
| | | { |
| | | this.name = treeName; |
| | | } |
| | | |
| | | /** |
| | | * Opens a database in this database container. If the provided database configuration is |
| | | * transactional, a transaction will be created and used to perform the open. |
| | |
| | | * @throws StorageRuntimeException |
| | | * if a database error occurs while opening the index. |
| | | */ |
| | | void open(WriteableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | txn.openTree(name); |
| | | } |
| | | void open(WriteableTransaction txn) throws StorageRuntimeException; |
| | | |
| | | /** |
| | | * Deletes this database and all of its contents. |
| | |
| | | * @throws StorageRuntimeException |
| | | * if a database error occurs while deleting the index. |
| | | */ |
| | | void delete(WriteableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | txn.deleteTree(name); |
| | | } |
| | | void delete(WriteableTransaction txn) throws StorageRuntimeException; |
| | | |
| | | /** |
| | | * Returns the number of key/value pairs in this database container. |
| | |
| | | * @throws StorageRuntimeException |
| | | * If an error occurs in the DB operation. |
| | | */ |
| | | long getRecordCount(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | return txn.getRecordCount(name); |
| | | } |
| | | long getRecordCount(ReadableTransaction txn) throws StorageRuntimeException; |
| | | |
| | | /** |
| | | * Get a string representation of this object. |
| | | * |
| | | * @return return A string representation of this object. |
| | | */ |
| | | @Override |
| | | public String toString() |
| | | { |
| | | return name.toString(); |
| | | } |
| | | String toString(); |
| | | |
| | | /** |
| | | * Get the database name for this database container. |
| | | * |
| | | * @return database name for this database container. |
| | | */ |
| | | final TreeName getName() |
| | | { |
| | | return name; |
| | | } |
| | | TreeName getName(); |
| | | |
| | | /** |
| | | * Set the database name to use for this container. |
| | | * |
| | | * @param name The database name to use for this container. |
| | | */ |
| | | final void setName(TreeName name) |
| | | { |
| | | this.name = name; |
| | | } |
| | | void setName(TreeName name); |
| | | } |
| | |
| | | AttributeBuilder needReindex = new AttributeBuilder("need-reindex"); |
| | | for(EntryContainer ec : rootContainer.getEntryContainers()) |
| | | { |
| | | List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>(); |
| | | ec.listDatabases(databases); |
| | | for(DatabaseContainer dc : databases) |
| | | for(DatabaseContainer dc : ec.listDatabases()) |
| | | { |
| | | if(dc instanceof Index && !((Index)dc).isTrusted()) |
| | | { |
| New file |
| | |
| | | /* |
| | | * CDDL HEADER START |
| | | * |
| | | * The contents of this file are subject to the terms of the |
| | | * Common Development and Distribution License, Version 1.0 only |
| | | * (the "License"). You may not use this file except in compliance |
| | | * with the License. |
| | | * |
| | | * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt |
| | | * or http://forgerock.org/license/CDDLv1.0.html. |
| | | * See the License for the specific language governing permissions |
| | | * and limitations under the License. |
| | | * |
| | | * When distributing Covered Code, include this CDDL HEADER in each |
| | | * file and include the License file at legal-notices/CDDLv1_0.txt. |
| | | * If applicable, add the following below this CDDL HEADER, with the |
| | | * fields enclosed by brackets "[]" replaced with your own identifying |
| | | * information: |
| | | * Portions Copyright [yyyy] [name of copyright owner] |
| | | * |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Copyright 2006-2010 Sun Microsystems, Inc. |
| | | * Portions Copyright 2012-2015 ForgeRock AS |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.forgerock.util.Reject.checkNotNull; |
| | | import static org.opends.messages.JebMessages.ERR_JEB_INDEX_CORRUPT_REQUIRES_REBUILD; |
| | | import static org.opends.server.backends.pluggable.EntryIDSet.*; |
| | | import static org.opends.server.backends.pluggable.State.IndexFlag.COMPACTED; |
| | | import static org.opends.server.backends.pluggable.State.IndexFlag.TRUSTED; |
| | | |
| | | import java.util.EnumSet; |
| | | |
| | | import org.forgerock.i18n.slf4j.LocalizedLogger; |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.util.promise.NeverThrowsException; |
| | | import org.opends.server.backends.pluggable.CursorTransformer.ValueTransformer; |
| | | import org.opends.server.backends.pluggable.EntryIDSet.EntryIDSetCodec; |
| | | import org.opends.server.backends.pluggable.State.IndexFlag; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.UpdateFunction; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | import org.opends.server.util.StaticUtils; |
| | | |
| | | /** |
| | | * Represents an index implemented by a tree in which each key maps to a set of entry IDs. The key |
| | | * is a byte array, and is constructed from some normalized form of an attribute value (or fragment |
| | | * of a value) appearing in the entry. |
| | | */ |
| | | class DefaultIndex extends AbstractDatabaseContainer implements Index |
| | | { |
| | | private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); |
| | | |
| | | /** The limit on the number of entry IDs that may be indexed by one key. */ |
| | | private int indexEntryLimit; |
| | | /** |
| | | * Whether to maintain a count of IDs for a key once the entry limit has exceeded. |
| | | */ |
| | | private final boolean maintainCount; |
| | | |
| | | private final State state; |
| | | |
| | | private final EntryIDSetCodec codec; |
| | | |
| | | /** |
| | | * A flag to indicate if this index should be trusted to be consistent with the entries database. |
| | | * If not trusted, we assume that existing entryIDSets for a key is still accurate. However, keys |
| | | * that do not exist are undefined instead of an empty entryIDSet. The following rules will be |
| | | * observed when the index is not trusted: - no entryIDs will be added to a non-existing key. - |
| | | * undefined entryIdSet will be returned whenever a key is not found. |
| | | */ |
| | | private volatile boolean trusted; |
| | | |
| | | /** |
| | | * Create a new index object. |
| | | * |
| | | * @param name |
| | | * The name of the index database within the entryContainer. |
| | | * @param state |
| | | * The state database to persist index state info. |
| | | * @param indexEntryLimit |
| | | * The configured limit on the number of entry IDs that may be indexed by one key. |
| | | * @param maintainCount |
| | | * Whether to maintain a count of IDs for a key once the entry limit has exceeded. |
| | | * @param txn |
| | | * a non null database transaction |
| | | * @param entryContainer |
| | | * The database entryContainer holding this index. |
| | | * @throws StorageRuntimeException |
| | | * If an error occurs in the database. |
| | | */ |
| | | DefaultIndex(TreeName name, State state, int indexEntryLimit, boolean maintainCount, WriteableTransaction txn, |
| | | EntryContainer entryContainer) throws StorageRuntimeException |
| | | { |
| | | super(name); |
| | | this.indexEntryLimit = indexEntryLimit; |
| | | this.maintainCount = maintainCount; |
| | | this.state = state; |
| | | |
| | | final EnumSet<IndexFlag> flags = state.getIndexFlags(txn, getName()); |
| | | this.codec = flags.contains(COMPACTED) ? CODEC_V2 : CODEC_V1; |
| | | this.trusted = flags.contains(TRUSTED); |
| | | if (!trusted && entryContainer.getHighestEntryID(txn).longValue() == 0) |
| | | { |
| | | // If there are no entries in the entry container then there |
| | | // is no reason why this index can't be upgraded to trusted. |
| | | setTrusted(txn, true); |
| | | } |
| | | } |
| | | |
| | | public final Cursor<ByteString, EntryIDSet> openCursor(ReadableTransaction txn) |
| | | { |
| | | checkNotNull(txn, "txn must not be null"); |
| | | return CursorTransformer.transformValues(txn.openCursor(getName()), |
| | | new ValueTransformer<ByteString, ByteString, EntryIDSet, NeverThrowsException>() |
| | | { |
| | | @Override |
| | | public EntryIDSet transform(ByteString key, ByteString value) throws NeverThrowsException |
| | | { |
| | | return codec.decode(key, value); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | public final void importPut(WriteableTransaction txn, ImportIDSet idsToBeAdded) throws StorageRuntimeException |
| | | { |
| | | ByteSequence key = idsToBeAdded.getKey(); |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount); |
| | | importIDSet.merge(idsToBeAdded); |
| | | txn.put(getName(), key, importIDSet.valueToByteString(codec)); |
| | | } |
| | | else |
| | | { |
| | | txn.put(getName(), key, idsToBeAdded.valueToByteString(codec)); |
| | | } |
| | | } |
| | | |
| | | public final void importRemove(WriteableTransaction txn, ImportIDSet idsToBeRemoved) throws StorageRuntimeException |
| | | { |
| | | ByteSequence key = idsToBeRemoved.getKey(); |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount); |
| | | importIDSet.remove(idsToBeRemoved); |
| | | if (importIDSet.isDefined() && importIDSet.size() == 0) |
| | | { |
| | | txn.delete(getName(), key); |
| | | } |
| | | else |
| | | { |
| | | txn.put(getName(), key, importIDSet.valueToByteString(codec)); |
| | | } |
| | | } |
| | | else |
| | | { |
| | | // Should never happen -- the keys should always be there. |
| | | throw new RuntimeException(); |
| | | } |
| | | } |
| | | |
| | | public final void update(WriteableTransaction txn, ByteString key, EntryIDSet deletedIDs, EntryIDSet addedIDs) |
| | | throws StorageRuntimeException |
| | | { |
| | | /* |
| | | * Check the special condition where both deletedIDs and addedIDs are null. This is used when |
| | | * deleting entries and corresponding id2children and id2subtree records must be completely |
| | | * removed. |
| | | */ |
| | | if (deletedIDs == null && addedIDs == null) |
| | | { |
| | | boolean success = txn.delete(getName(), key); |
| | | if (success && logger.isTraceEnabled()) |
| | | { |
| | | StringBuilder builder = new StringBuilder(); |
| | | StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4); |
| | | logger.trace("The expected key does not exist in the index %s.\nKey:%s ", getName(), builder); |
| | | } |
| | | return; |
| | | } |
| | | |
| | | // Handle cases where nothing is changed early to avoid DB access. |
| | | if (isNullOrEmpty(deletedIDs) && isNullOrEmpty(addedIDs)) |
| | | { |
| | | return; |
| | | } |
| | | |
| | | if (maintainCount) |
| | | { |
| | | update0(txn, key, deletedIDs, addedIDs); |
| | | } |
| | | else if (get(txn, key).isDefined()) |
| | | { |
| | | /* |
| | | * Avoid taking a write lock on a record which has hit all IDs because it is likely to be a |
| | | * point of contention. |
| | | */ |
| | | update0(txn, key, deletedIDs, addedIDs); |
| | | } // else the record exists but we've hit all IDs. |
| | | } |
| | | |
| | | private boolean isNullOrEmpty(EntryIDSet entryIDSet) |
| | | { |
| | | return entryIDSet == null || entryIDSet.size() == 0; |
| | | } |
| | | |
| | | private boolean isNotEmpty(EntryIDSet entryIDSet) |
| | | { |
| | | return entryIDSet != null && entryIDSet.size() > 0; |
| | | } |
| | | |
| | | private void update0(final WriteableTransaction txn, final ByteString key, final EntryIDSet deletedIDs, |
| | | final EntryIDSet addedIDs) throws StorageRuntimeException |
| | | { |
| | | txn.update(getName(), key, new UpdateFunction() |
| | | { |
| | | @Override |
| | | public ByteSequence computeNewValue(final ByteSequence oldValue) |
| | | { |
| | | if (oldValue != null) |
| | | { |
| | | EntryIDSet entryIDSet = computeEntryIDSet(key, oldValue.toByteString(), deletedIDs, addedIDs); |
| | | ByteString after = codec.encode(entryIDSet); |
| | | /* |
| | | * If there are no more IDs then return null indicating that the record should be removed. |
| | | * If index is not trusted then this will cause all subsequent reads for this key to |
| | | * return undefined set. |
| | | */ |
| | | return after.isEmpty() ? null : after; |
| | | } |
| | | else if (trusted) |
| | | { |
| | | if (deletedIDs != null) |
| | | { |
| | | logIndexCorruptError(txn, key); |
| | | } |
| | | if (isNotEmpty(addedIDs)) |
| | | { |
| | | return codec.encode(addedIDs); |
| | | } |
| | | } |
| | | return null; // no change. |
| | | } |
| | | }); |
| | | } |
| | | |
| | | private EntryIDSet computeEntryIDSet(ByteString key, ByteString value, EntryIDSet deletedIDs, EntryIDSet addedIDs) |
| | | { |
| | | EntryIDSet entryIDSet = codec.decode(key, value); |
| | | if (addedIDs != null) |
| | | { |
| | | if (entryIDSet.isDefined() && indexEntryLimit > 0) |
| | | { |
| | | long idCountDelta = addedIDs.size(); |
| | | if (deletedIDs != null) |
| | | { |
| | | idCountDelta -= deletedIDs.size(); |
| | | } |
| | | if (idCountDelta + entryIDSet.size() >= indexEntryLimit) |
| | | { |
| | | if (maintainCount) |
| | | { |
| | | entryIDSet = newUndefinedSetWithSize(key, entryIDSet.size() + idCountDelta); |
| | | } |
| | | else |
| | | { |
| | | entryIDSet = newUndefinedSet(); |
| | | } |
| | | |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | StringBuilder builder = new StringBuilder(); |
| | | StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4); |
| | | logger.trace("Index entry exceeded in index %s. " + "Limit: %d. ID list size: %d.\nKey:%s", getName(), |
| | | indexEntryLimit, idCountDelta + addedIDs.size(), builder); |
| | | |
| | | } |
| | | } |
| | | else |
| | | { |
| | | entryIDSet.addAll(addedIDs); |
| | | if (deletedIDs != null) |
| | | { |
| | | entryIDSet.removeAll(deletedIDs); |
| | | } |
| | | } |
| | | } |
| | | else |
| | | { |
| | | entryIDSet.addAll(addedIDs); |
| | | if (deletedIDs != null) |
| | | { |
| | | entryIDSet.removeAll(deletedIDs); |
| | | } |
| | | } |
| | | } |
| | | else if (deletedIDs != null) |
| | | { |
| | | entryIDSet.removeAll(deletedIDs); |
| | | } |
| | | return entryIDSet; |
| | | } |
| | | |
| | | private void logIndexCorruptError(WriteableTransaction txn, ByteString key) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | StringBuilder builder = new StringBuilder(); |
| | | StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4); |
| | | logger.trace("The expected key does not exist in the index %s.\nKey:%s", getName(), builder); |
| | | } |
| | | |
| | | setTrusted(txn, false); |
| | | logger.error(ERR_JEB_INDEX_CORRUPT_REQUIRES_REBUILD, getName()); |
| | | } |
| | | |
| | | public final EntryIDSet get(ReadableTransaction txn, ByteSequence key) |
| | | { |
| | | try |
| | | { |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | return codec.decode(key, value); |
| | | } |
| | | return trusted ? newDefinedSet() : newUndefinedSet(); |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | return newUndefinedSet(); |
| | | } |
| | | } |
| | | |
| | | public final boolean setIndexEntryLimit(int indexEntryLimit) |
| | | { |
| | | final boolean rebuildRequired = this.indexEntryLimit < indexEntryLimit; |
| | | this.indexEntryLimit = indexEntryLimit; |
| | | return rebuildRequired; |
| | | } |
| | | |
| | | public final int getIndexEntryLimit() |
| | | { |
| | | return indexEntryLimit; |
| | | } |
| | | |
| | | public final synchronized void setTrusted(WriteableTransaction txn, boolean trusted) throws StorageRuntimeException |
| | | { |
| | | this.trusted = trusted; |
| | | if (trusted) |
| | | { |
| | | state.addFlagsToIndex(txn, getName(), TRUSTED); |
| | | } |
| | | else |
| | | { |
| | | state.removeFlagsFromIndex(txn, getName(), TRUSTED); |
| | | } |
| | | } |
| | | |
| | | public final boolean isTrusted() |
| | | { |
| | | return trusted; |
| | | } |
| | | |
| | | public final boolean getMaintainCount() |
| | | { |
| | | return maintainCount; |
| | | } |
| | | } |
| | |
| | | import org.opends.server.api.VirtualAttributeProvider; |
| | | import org.opends.server.api.plugin.PluginResult.SubordinateDelete; |
| | | import org.opends.server.api.plugin.PluginResult.SubordinateModifyDN; |
| | | import org.opends.server.backends.pluggable.State.IndexFlag; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadOperation; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | //Try creating all the indexes before confirming they are valid ones. |
| | | new AttributeIndex(cfg, EntryContainer.this, txn); |
| | | new AttributeIndex(cfg, state, EntryContainer.this, txn); |
| | | } |
| | | }); |
| | | return true; |
| | |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | final AttributeIndex index = new AttributeIndex(cfg, EntryContainer.this, txn); |
| | | final AttributeIndex index = new AttributeIndex(cfg, state, EntryContainer.this, txn); |
| | | index.open(txn); |
| | | if (!index.isTrusted()) |
| | | { |
| | |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | AttributeIndex index = attrIndexMap.get(cfg.getAttribute()); |
| | | deleteAttributeIndex(txn, index); |
| | | attrIndexMap.remove(cfg.getAttribute()); |
| | | attrIndexMap.remove(cfg.getAttribute()).closeAndDelete(txn); |
| | | } |
| | | }); |
| | | } |
| | |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | VLVIndex vlvIndex = vlvIndexMap.get(cfg.getName().toLowerCase()); |
| | | deleteDatabase(txn, vlvIndex); |
| | | vlvIndexMap.remove(cfg.getName()); |
| | | vlvIndexMap.remove(cfg.getName().toLowerCase()).closeAndDelete(txn); |
| | | } |
| | | }); |
| | | } |
| | |
| | | state = new State(getIndexName(STATE_DATABASE_NAME)); |
| | | state.open(txn); |
| | | |
| | | if (config.isSubordinateIndexesEnabled()) |
| | | { |
| | | openSubordinateIndexes(txn); |
| | | } |
| | | else |
| | | { |
| | | // Use a null index and ensure that future attempts to use the real |
| | | // subordinate indexes will fail. |
| | | id2children = openNewNullIndex(txn, ID2CHILDREN_DATABASE_NAME, new ID2CIndexer()); |
| | | id2subtree = openNewNullIndex(txn, ID2SUBTREE_DATABASE_NAME, new ID2SIndexer()); |
| | | logger.info(NOTE_JEB_SUBORDINATE_INDEXES_DISABLED, backend.getBackendID()); |
| | | } |
| | | openSubordinateIndexes(txn, config); |
| | | |
| | | dn2uri = new DN2URI(getIndexName(REFERRAL_DATABASE_NAME), this); |
| | | dn2uri.open(txn); |
| | |
| | | { |
| | | BackendIndexCfg indexCfg = config.getBackendIndex(idx); |
| | | |
| | | AttributeIndex index = new AttributeIndex(indexCfg, this, txn); |
| | | AttributeIndex index = new AttributeIndex(indexCfg, state, this, txn); |
| | | index.open(txn); |
| | | if(!index.isTrusted()) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | private NullIndex openNewNullIndex(WriteableTransaction txn, String indexId, Indexer indexer) |
| | | private NullIndex openNewNullIndex(WriteableTransaction txn, String name) |
| | | { |
| | | return new NullIndex(getIndexName(indexId), indexer, state, txn, this); |
| | | final TreeName treeName = getIndexName(name); |
| | | final NullIndex index = new NullIndex(treeName); |
| | | state.removeFlagsFromIndex(txn, treeName, IndexFlag.TRUSTED); |
| | | txn.deleteTree(treeName); |
| | | return index; |
| | | } |
| | | |
| | | /** |
| | |
| | | if (entryID != null) |
| | | { |
| | | final Index index = subtree ? id2subtree : id2children; |
| | | final EntryIDSet entryIDSet = index.read(txn, entryID.toByteString()); |
| | | final EntryIDSet entryIDSet = index.get(txn, entryID.toByteString()); |
| | | long count = entryIDSet.size(); |
| | | if (count != Long.MAX_VALUE) |
| | | { |
| | |
| | | EntryIDSet scopeSet; |
| | | if (searchScope == SearchScope.SINGLE_LEVEL) |
| | | { |
| | | scopeSet = id2children.read(txn, baseIDData); |
| | | scopeSet = id2children.get(txn, baseIDData); |
| | | } |
| | | else |
| | | { |
| | | scopeSet = id2subtree.read(txn, baseIDData); |
| | | scopeSet = id2subtree.get(txn, baseIDData); |
| | | if (searchScope == SearchScope.WHOLE_SUBTREE) |
| | | { |
| | | // The id2subtree list does not include the base entry ID. |
| | |
| | | if (parentDN != null) |
| | | { |
| | | final ByteString parentIDKeyBytes = parentID.toByteString(); |
| | | id2children.insertID(indexBuffer, parentIDKeyBytes, entryID); |
| | | id2subtree.insertID(indexBuffer, parentIDKeyBytes, entryID); |
| | | indexBuffer.put(id2children, parentIDKeyBytes, entryID); |
| | | indexBuffer.put(id2subtree, parentIDKeyBytes, entryID); |
| | | |
| | | // Iterate up through the superior entries, starting above the |
| | | // parent. |
| | |
| | | } |
| | | |
| | | // Insert into id2subtree for this node. |
| | | id2subtree.insertID(indexBuffer, nodeID.toByteString(), entryID); |
| | | indexBuffer.put(id2subtree, nodeID.toByteString(), entryID); |
| | | } |
| | | } |
| | | indexBuffer.flush(txn); |
| | |
| | | |
| | | // Remove the id2c and id2s records for this entry. |
| | | final ByteString leafIDKeyBytes = leafID.toByteString(); |
| | | id2children.delete(indexBuffer, leafIDKeyBytes); |
| | | id2subtree.delete(indexBuffer, leafIDKeyBytes); |
| | | indexBuffer.remove(id2children, leafIDKeyBytes); |
| | | indexBuffer.remove(id2subtree, leafIDKeyBytes); |
| | | |
| | | // Iterate up through the superior entries from the target entry. |
| | | boolean isParent = true; |
| | |
| | | // Remove from id2children. |
| | | if (isParent) |
| | | { |
| | | id2children.removeID(indexBuffer, parentIDBytes, leafID); |
| | | indexBuffer.remove(id2children, parentIDBytes, leafID); |
| | | isParent = false; |
| | | } |
| | | id2subtree.removeID(indexBuffer, parentIDBytes, leafID); |
| | | indexBuffer.remove(id2subtree, parentIDBytes, leafID); |
| | | } |
| | | |
| | | // Remove the entry from the entry cache. |
| | |
| | | ByteString parentIDKeyBytes = parentID.toByteString(); |
| | | if(isParent) |
| | | { |
| | | id2children.insertID(buffer, parentIDKeyBytes, newID); |
| | | buffer.put(id2children, parentIDKeyBytes, newID); |
| | | isParent = false; |
| | | } |
| | | id2subtree.insertID(buffer, parentIDKeyBytes, newID); |
| | | buffer.put(id2subtree, parentIDKeyBytes, newID); |
| | | } |
| | | } |
| | | } |
| | |
| | | ByteString parentIDKeyBytes = parentID.toByteString(); |
| | | if(isParent) |
| | | { |
| | | id2children.removeID(buffer, parentIDKeyBytes, oldID); |
| | | buffer.remove(id2children, parentIDKeyBytes, oldID); |
| | | isParent = false; |
| | | } |
| | | id2subtree.removeID(buffer, parentIDKeyBytes, oldID); |
| | | buffer.remove(id2subtree, parentIDKeyBytes, oldID); |
| | | } |
| | | } |
| | | |
| | |
| | | // All the subordinates will be renumbered so we have to rebuild |
| | | // id2c and id2s with the new ID. |
| | | ByteString oldIDKeyBytes = oldID.toByteString(); |
| | | id2children.delete(buffer, oldIDKeyBytes); |
| | | id2subtree.delete(buffer, oldIDKeyBytes); |
| | | buffer.remove(id2children, oldIDKeyBytes); |
| | | buffer.remove(id2subtree, oldIDKeyBytes); |
| | | |
| | | // Reindex the entry with the new ID. |
| | | indexRemoveEntry(buffer, oldEntry, oldID); |
| | |
| | | { |
| | | EntryID parentID = dn2id.get(txn, dn); |
| | | ByteString parentIDKeyBytes = parentID.toByteString(); |
| | | id2subtree.removeID(buffer, parentIDKeyBytes, oldID); |
| | | buffer.remove(id2subtree, parentIDKeyBytes, oldID); |
| | | } |
| | | } |
| | | |
| | |
| | | // All the subordinates will be renumbered so we have to rebuild |
| | | // id2c and id2s with the new ID. |
| | | ByteString oldIDKeyBytes = oldID.toByteString(); |
| | | id2children.delete(buffer, oldIDKeyBytes); |
| | | id2subtree.delete(buffer, oldIDKeyBytes); |
| | | buffer.remove(id2children, oldIDKeyBytes); |
| | | buffer.remove(id2subtree, oldIDKeyBytes); |
| | | |
| | | // Reindex the entry with the new ID. |
| | | indexRemoveEntry(buffer, oldEntry, oldID); |
| | |
| | | final EntryID entryID = dn2id.get(txn, baseDN); |
| | | if (entryID != null) |
| | | { |
| | | final EntryIDSet entryIDSet = id2subtree.read(txn, entryID.toByteString()); |
| | | final EntryIDSet entryIDSet = id2subtree.get(txn, entryID.toByteString()); |
| | | long count = entryIDSet.size(); |
| | | if(count != Long.MAX_VALUE) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Get a list of the databases opened by the entryContainer. |
| | | * @param dbList A list of database containers. |
| | | */ |
| | | void listDatabases(List<DatabaseContainer> dbList) |
| | | { |
| | | dbList.add(dn2id); |
| | | dbList.add(id2entry); |
| | | dbList.add(dn2uri); |
| | | if (config.isSubordinateIndexesEnabled()) |
| | | { |
| | | dbList.add(id2children); |
| | | dbList.add(id2subtree); |
| | | } |
| | | dbList.add(state); |
| | | |
| | | for(AttributeIndex index : attrIndexMap.values()) |
| | | { |
| | | index.listDatabases(dbList); |
| | | } |
| | | |
| | | dbList.addAll(vlvIndexMap.values()); |
| | | } |
| | | |
| | | /** |
| | | * Determine whether the provided operation has the ManageDsaIT request |
| | | * control. |
| | |
| | | */ |
| | | void delete(WriteableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>(); |
| | | listDatabases(databases); |
| | | |
| | | for (DatabaseContainer db : databases) |
| | | for (DatabaseContainer db : listDatabases()) |
| | | { |
| | | db.delete(txn); |
| | | } |
| | |
| | | } |
| | | |
| | | /** |
| | | * Removes a attribute index from disk. |
| | | * |
| | | * @param attributeIndex The attribute index to remove. |
| | | * @throws StorageRuntimeException If an database error occurs while attempting |
| | | * to delete the index. |
| | | */ |
| | | private void deleteAttributeIndex(WriteableTransaction txn, AttributeIndex attributeIndex) |
| | | throws StorageRuntimeException |
| | | { |
| | | attributeIndex.close(); |
| | | for (Index index : attributeIndex.getAllIndexes()) |
| | | { |
| | | index.delete(txn); |
| | | state.deleteRecord(txn, index.getName()); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * This method constructs a container name from a base DN. Only alphanumeric |
| | | * characters are preserved, all other characters are replaced with an |
| | | * underscore. |
| | |
| | | */ |
| | | void setDatabasePrefix(final String newBaseDN) throws StorageRuntimeException |
| | | { |
| | | final List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>(); |
| | | listDatabases(databases); |
| | | final List<DatabaseContainer> databases = listDatabases(); |
| | | try |
| | | { |
| | | // Rename in transaction. |
| | |
| | | { |
| | | if (config.isSubordinateIndexesEnabled() != cfg.isSubordinateIndexesEnabled()) |
| | | { |
| | | if (cfg.isSubordinateIndexesEnabled()) |
| | | { |
| | | // Re-enabling subordinate indexes. |
| | | openSubordinateIndexes(txn); |
| | | } |
| | | else |
| | | { |
| | | // Disabling subordinate indexes. Use a null index and ensure that |
| | | // future attempts to use the real indexes will fail. |
| | | id2children = openNewNullIndex(txn, ID2CHILDREN_DATABASE_NAME, new ID2CIndexer()); |
| | | id2subtree = openNewNullIndex(txn, ID2SUBTREE_DATABASE_NAME, new ID2SIndexer()); |
| | | logger.info(NOTE_JEB_SUBORDINATE_INDEXES_DISABLED, cfg.getBackendId()); |
| | | } |
| | | openSubordinateIndexes(txn, cfg); |
| | | } |
| | | |
| | | if (config.getIndexEntryLimit() != cfg.getIndexEntryLimit()) |
| | |
| | | |
| | | private void clear0(WriteableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | final List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>(); |
| | | listDatabases(databases); |
| | | final List<DatabaseContainer> databases = listDatabases(); |
| | | try |
| | | { |
| | | for (DatabaseContainer db : databases) |
| | |
| | | } |
| | | } |
| | | |
| | | List<DatabaseContainer> listDatabases() |
| | | { |
| | | final List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>(); |
| | | databases.add(dn2id); |
| | | databases.add(id2entry); |
| | | databases.add(dn2uri); |
| | | if (config.isSubordinateIndexesEnabled()) |
| | | { |
| | | databases.add(id2children); |
| | | databases.add(id2subtree); |
| | | } |
| | | databases.add(state); |
| | | |
| | | for (AttributeIndex index : attrIndexMap.values()) |
| | | { |
| | | index.listDatabases(databases); |
| | | } |
| | | |
| | | databases.addAll(vlvIndexMap.values()); |
| | | return databases; |
| | | } |
| | | |
| | | /** |
| | | * Clear the contents for a database from disk. |
| | | * |
| | |
| | | } |
| | | |
| | | /** Opens the id2children and id2subtree indexes. */ |
| | | private void openSubordinateIndexes(WriteableTransaction txn) |
| | | private void openSubordinateIndexes(WriteableTransaction txn, PluggableBackendCfg cfg) |
| | | { |
| | | id2children = newIndex(txn, ID2CHILDREN_DATABASE_NAME, new ID2CIndexer()); |
| | | id2subtree = newIndex(txn, ID2SUBTREE_DATABASE_NAME, new ID2SIndexer()); |
| | | } |
| | | |
| | | private Index newIndex(WriteableTransaction txn, String name, Indexer indexer) |
| | | { |
| | | final Index index = new Index(getIndexName(name), indexer, state, config.getIndexEntryLimit(), 0, true, txn, this); |
| | | index.open(txn); |
| | | if (!index.isTrusted()) |
| | | if (cfg.isSubordinateIndexesEnabled()) |
| | | { |
| | | logger.info(NOTE_JEB_INDEX_ADD_REQUIRES_REBUILD, index.getName()); |
| | | } |
| | | return index; |
| | | } |
| | | TreeName name = getIndexName(ID2CHILDREN_DATABASE_NAME); |
| | | id2children = new DefaultIndex(name, state, config.getIndexEntryLimit(), true, txn, this); |
| | | id2children.open(txn); |
| | | if (!id2children.isTrusted()) |
| | | { |
| | | logger.info(NOTE_JEB_INDEX_ADD_REQUIRES_REBUILD, name); |
| | | } |
| | | |
| | | /** |
| | | * Creates a new index for an attribute. |
| | | * |
| | | * @param txn a non null database transaction |
| | | * @param indexName the name to give to the new index |
| | | * @param indexer the indexer to use when inserting data into the index |
| | | * @param indexEntryLimit the index entry limit |
| | | * @return a new index |
| | | */ |
| | | Index newIndexForAttribute(WriteableTransaction txn, TreeName indexName, Indexer indexer, int indexEntryLimit) |
| | | { |
| | | return new Index(indexName, indexer, state, indexEntryLimit, CURSOR_ENTRY_LIMIT, false, txn, this); |
| | | name = getIndexName(ID2SUBTREE_DATABASE_NAME); |
| | | id2subtree = new DefaultIndex(name, state, config.getIndexEntryLimit(), true, txn, this); |
| | | id2subtree.open(txn); |
| | | if (!id2subtree.isTrusted()) |
| | | { |
| | | logger.info(NOTE_JEB_INDEX_ADD_REQUIRES_REBUILD, name); |
| | | } |
| | | } |
| | | else |
| | | { |
| | | // Disabling subordinate indexes. Use a null index and ensure that |
| | | // future attempts to use the real indexes will fail. |
| | | id2children = openNewNullIndex(txn, ID2CHILDREN_DATABASE_NAME); |
| | | id2subtree = openNewNullIndex(txn, ID2SUBTREE_DATABASE_NAME); |
| | | logger.info(NOTE_JEB_SUBORDINATE_INDEXES_DISABLED, cfg.getBackendId()); |
| | | } |
| | | } |
| | | |
| | | |
| | |
| | | * Represents a set of Entry IDs. It can represent a set where the IDs are not defined, for example when the index entry |
| | | * limit has been exceeded. |
| | | */ |
| | | @SuppressWarnings("javadoc") |
| | | final class EntryIDSet implements Iterable<EntryID> |
| | | { |
| | | public static final EntryIDSetCodec CODEC_V1 = new EntryIDSetCodecV1(); |
| | |
| | | * Represents the database containing the LDAP entries. The database key is |
| | | * the entry ID and the value is the entry contents. |
| | | */ |
| | | class ID2Entry extends DatabaseContainer |
| | | class ID2Entry extends AbstractDatabaseContainer |
| | | { |
| | | private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); |
| | | |
| | |
| | | import org.opends.server.backends.RebuildConfig; |
| | | import org.opends.server.backends.RebuildConfig.RebuildMode; |
| | | import org.opends.server.backends.persistit.PersistItStorage; |
| | | import org.opends.server.backends.pluggable.AttributeIndex.MatchingRuleIndex; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.Storage; |
| | |
| | | putInIdContainerMap(attributeIndex.getSubstringIndex()); |
| | | putInIdContainerMap(attributeIndex.getOrderingIndex()); |
| | | putInIdContainerMap(attributeIndex.getApproximateIndex()); |
| | | Map<String, Collection<Index>> extensibleMap = attributeIndex.getExtensibleIndexes(); |
| | | Map<String, Collection<MatchingRuleIndex>> extensibleMap = attributeIndex.getExtensibleIndexes(); |
| | | if (!extensibleMap.isEmpty()) |
| | | { |
| | | putInIdContainerMap(extensibleMap.get(EXTENSIBLE_INDEXER_ID_SUBSTRING)); |
| | |
| | | } |
| | | } |
| | | |
| | | private void putInIdContainerMap(Collection<Index> indexes) |
| | | private void putInIdContainerMap(Collection<MatchingRuleIndex> indexes) |
| | | { |
| | | if (indexes != null) |
| | | { |
| | |
| | | importCount.getAndIncrement(); |
| | | } |
| | | |
| | | void processAllIndexes(Suffix suffix, Entry entry, EntryID entryID) |
| | | throws DirectoryException, StorageRuntimeException, InterruptedException |
| | | void processAllIndexes(Suffix suffix, Entry entry, EntryID entryID) throws StorageRuntimeException, |
| | | InterruptedException |
| | | { |
| | | for (Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix.getAttrIndexMap().entrySet()) |
| | | { |
| | |
| | | } |
| | | |
| | | @Override |
| | | void processAttribute(Index index, Entry entry, EntryID entryID, IndexingOptions options, |
| | | void processAttribute(MatchingRuleIndex index, Entry entry, EntryID entryID, IndexingOptions options, |
| | | IndexKey indexKey) throws StorageRuntimeException, InterruptedException |
| | | { |
| | | if (oldEntry != null) |
| | |
| | | } |
| | | |
| | | /** Examine the DN for duplicates and missing parents. */ |
| | | @SuppressWarnings("javadoc") |
| | | boolean dnSanityCheck(DN entryDN, Entry entry, Suffix suffix) |
| | | throws StorageRuntimeException, InterruptedException |
| | | { |
| | |
| | | return true; |
| | | } |
| | | |
| | | void processIndexes(Suffix suffix, Entry entry, EntryID entryID) |
| | | throws DirectoryException, StorageRuntimeException, InterruptedException |
| | | void processIndexes(Suffix suffix, Entry entry, EntryID entryID) throws StorageRuntimeException, |
| | | InterruptedException |
| | | { |
| | | for (Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix.getAttrIndexMap().entrySet()) |
| | | { |
| | |
| | | } |
| | | |
| | | void fillIndexKey(AttributeIndex attrIndex, Entry entry, AttributeType attrType, EntryID entryID) |
| | | throws InterruptedException, DirectoryException, StorageRuntimeException |
| | | throws InterruptedException, StorageRuntimeException |
| | | { |
| | | final IndexingOptions options = attrIndex.getIndexingOptions(); |
| | | |
| | |
| | | processAttribute(attrIndex.getOrderingIndex(), ImportIndexType.ORDERING, entry, attrType, entryID, options); |
| | | processAttribute(attrIndex.getApproximateIndex(), ImportIndexType.APPROXIMATE, entry, attrType, entryID, options); |
| | | |
| | | Map<String, Collection<Index>> extensibleMap = attrIndex.getExtensibleIndexes(); |
| | | Map<String, Collection<MatchingRuleIndex>> extensibleMap = attrIndex.getExtensibleIndexes(); |
| | | if (!extensibleMap.isEmpty()) |
| | | { |
| | | Collection<Index> subIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SUBSTRING); |
| | | Collection<MatchingRuleIndex> subIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SUBSTRING); |
| | | processAttributes(subIndexes, ImportIndexType.EX_SUBSTRING, entry, attrType, entryID, options); |
| | | Collection<Index> sharedIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SHARED); |
| | | Collection<MatchingRuleIndex> sharedIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SHARED); |
| | | processAttributes(sharedIndexes, ImportIndexType.EX_SHARED, entry, attrType, entryID, options); |
| | | } |
| | | } |
| | |
| | | buffer.flush(txn); |
| | | } |
| | | |
| | | private void processAttributes(Collection<Index> indexes, ImportIndexType indexType, Entry entry, |
| | | private void processAttributes(Collection<MatchingRuleIndex> indexes, ImportIndexType indexType, Entry entry, |
| | | AttributeType attributeType, EntryID entryID, IndexingOptions options) throws InterruptedException |
| | | { |
| | | if (indexes != null) |
| | | { |
| | | for (Index index : indexes) |
| | | for (MatchingRuleIndex index : indexes) |
| | | { |
| | | processAttribute(index, indexType, entry, attributeType, entryID, options); |
| | | } |
| | | } |
| | | } |
| | | |
| | | private void processAttribute(Index index, ImportIndexType indexType, Entry entry, |
| | | private void processAttribute(MatchingRuleIndex index, ImportIndexType indexType, Entry entry, |
| | | AttributeType attributeType, EntryID entryID, IndexingOptions options) throws InterruptedException |
| | | { |
| | | if (index != null) |
| | |
| | | } |
| | | } |
| | | |
| | | void processAttribute(Index index, Entry entry, EntryID entryID, IndexingOptions options, |
| | | void processAttribute(MatchingRuleIndex index, Entry entry, EntryID entryID, IndexingOptions options, |
| | | IndexKey indexKey) throws StorageRuntimeException, InterruptedException |
| | | { |
| | | insertKeySet.clear(); |
| | |
| | | if (deleteSet.size() > 0 || !deleteSet.isDefined()) |
| | | { |
| | | final Index index = indexIDToIndexMap.get(indexID); |
| | | index.delete(txn, deleteSet); |
| | | index.importRemove(txn, deleteSet); |
| | | } |
| | | if (insertSet.size() > 0 || !insertSet.isDefined()) |
| | | { |
| | | final Index index = indexIDToIndexMap.get(indexID); |
| | | index.insert(txn, insertSet); |
| | | index.importPut(txn, insertSet); |
| | | } |
| | | } |
| | | } |
| | |
| | | { |
| | | for (ImportIDSet idSet : map.values()) |
| | | { |
| | | index.insert(txn, idSet); |
| | | index.importPut(txn, idSet); |
| | | } |
| | | if (clearMap) |
| | | { |
| | |
| | | private final PluggableBackendCfg cfg; |
| | | |
| | | /** Map of index keys to indexes. */ |
| | | private final Map<IndexKey, Index> indexMap = |
| | | new LinkedHashMap<IndexKey, Index>(); |
| | | private final Map<IndexKey, MatchingRuleIndex> indexMap = |
| | | new LinkedHashMap<IndexKey, MatchingRuleIndex>(); |
| | | |
| | | /** Map of index keys to extensible indexes. */ |
| | | private final Map<IndexKey, Collection<Index>> extensibleIndexMap = |
| | | new LinkedHashMap<IndexKey, Collection<Index>>(); |
| | | private final Map<IndexKey, Collection<MatchingRuleIndex>> extensibleIndexMap = |
| | | new LinkedHashMap<IndexKey, Collection<MatchingRuleIndex>>(); |
| | | |
| | | /** List of VLV indexes. */ |
| | | private final List<VLVIndex> vlvIndexes = new LinkedList<VLVIndex>(); |
| | |
| | | fillIndexMap(txn, attrType, attrIndex.getPresenceIndex(), ImportIndexType.PRESENCE, onlyDegraded); |
| | | fillIndexMap(txn, attrType, attrIndex.getApproximateIndex(), ImportIndexType.APPROXIMATE, onlyDegraded); |
| | | |
| | | final Map<String, Collection<Index>> extensibleMap = attrIndex.getExtensibleIndexes(); |
| | | final Map<String, Collection<MatchingRuleIndex>> extensibleMap = attrIndex.getExtensibleIndexes(); |
| | | if (!extensibleMap.isEmpty()) |
| | | { |
| | | final Collection<Index> subIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SUBSTRING); |
| | | final Collection<MatchingRuleIndex> subIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SUBSTRING); |
| | | fillIndexMap(txn, attrType, subIndexes, ImportIndexType.EX_SUBSTRING, onlyDegraded); |
| | | final Collection<Index> sharedIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SHARED); |
| | | final Collection<MatchingRuleIndex> sharedIndexes = extensibleMap.get(EXTENSIBLE_INDEXER_ID_SHARED); |
| | | fillIndexMap(txn, attrType, sharedIndexes, ImportIndexType.EX_SHARED, onlyDegraded); |
| | | } |
| | | } |
| | | |
| | | private void fillIndexMap(WriteableTransaction txn, AttributeType attrType, Collection<Index> indexes, |
| | | private void fillIndexMap(WriteableTransaction txn, AttributeType attrType, Collection<MatchingRuleIndex> indexes, |
| | | ImportIndexType importIndexType, boolean onlyDegraded) |
| | | { |
| | | if (indexes != null && !indexes.isEmpty()) |
| | | { |
| | | final List<Index> mutableCopy = new LinkedList<Index>(indexes); |
| | | for (final Iterator<Index> it = mutableCopy.iterator(); it.hasNext();) |
| | | final List<MatchingRuleIndex> mutableCopy = new LinkedList<MatchingRuleIndex>(indexes); |
| | | for (final Iterator<MatchingRuleIndex> it = mutableCopy.iterator(); it.hasNext();) |
| | | { |
| | | final Index index = it.next(); |
| | | if (!onlyDegraded || !index.isTrusted()) |
| | |
| | | } |
| | | } |
| | | |
| | | private void fillIndexMap(WriteableTransaction txn, AttributeType attrType, Index index, |
| | | private void fillIndexMap(WriteableTransaction txn, AttributeType attrType, MatchingRuleIndex index, |
| | | ImportIndexType importIndexType, boolean onlyDegraded) |
| | | { |
| | | if (index != null |
| | | && (!onlyDegraded || !index.isTrusted()) |
| | | if (index != null && (!onlyDegraded || !index.isTrusted()) |
| | | && (!rebuildConfig.isClearDegradedState() || index.getRecordCount(txn) == 0)) |
| | | { |
| | | putInIdContainerMap(index); |
| | |
| | | |
| | | if (!extensibleIndexMap.isEmpty()) |
| | | { |
| | | for (final Collection<Index> subIndexes : extensibleIndexMap.values()) |
| | | for (final Collection<MatchingRuleIndex> subIndexes : extensibleIndexMap.values()) |
| | | { |
| | | if (subIndexes != null) |
| | | { |
| | |
| | | } |
| | | if (!extensibleIndexMap.isEmpty()) |
| | | { |
| | | for (Collection<Index> subIndexes : extensibleIndexMap.values()) |
| | | for (Collection<MatchingRuleIndex> subIndexes : extensibleIndexMap.values()) |
| | | { |
| | | setTrusted(txn, subIndexes, trusted); |
| | | } |
| | |
| | | } |
| | | } |
| | | |
| | | private void setTrusted(WriteableTransaction txn, final Collection<Index> indexes, boolean trusted) |
| | | private void setTrusted(WriteableTransaction txn, final Collection<MatchingRuleIndex> indexes, boolean trusted) |
| | | { |
| | | if (indexes != null && !indexes.isEmpty()) |
| | | { |
| | |
| | | private void processExtensibleIndexes(Entry entry, EntryID entryID) |
| | | throws InterruptedException |
| | | { |
| | | for (Map.Entry<IndexKey, Collection<Index>> mapEntry : |
| | | for (Map.Entry<IndexKey, Collection<MatchingRuleIndex>> mapEntry : |
| | | this.extensibleIndexMap.entrySet()) |
| | | { |
| | | IndexKey key = mapEntry.getKey(); |
| | |
| | | { |
| | | AttributeIndex attributeIndex = entryContainer.getAttributeIndex(attrType); |
| | | IndexingOptions options = attributeIndex.getIndexingOptions(); |
| | | for (Index index : mapEntry.getValue()) |
| | | for (MatchingRuleIndex index : mapEntry.getValue()) |
| | | { |
| | | processAttribute(index, entry, entryID, options, key); |
| | | } |
| | |
| | | private void processIndexes(Entry entry, EntryID entryID) |
| | | throws StorageRuntimeException, InterruptedException |
| | | { |
| | | for (Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet()) |
| | | for (Map.Entry<IndexKey, MatchingRuleIndex> mapEntry : indexMap.entrySet()) |
| | | { |
| | | IndexKey key = mapEntry.getKey(); |
| | | AttributeType attrType = key.getAttributeType(); |
| | |
| | | { |
| | | AttributeIndex attributeIndex = entryContainer.getAttributeIndex(attrType); |
| | | IndexingOptions options = attributeIndex.getIndexingOptions(); |
| | | Index index = mapEntry.getValue(); |
| | | MatchingRuleIndex index = mapEntry.getValue(); |
| | | processAttribute(index, entry, entryID, options, key); |
| | | } |
| | | } |
| | |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.JebMessages.*; |
| | | import static org.opends.server.backends.pluggable.EntryIDSet.*; |
| | | import static org.opends.server.backends.pluggable.State.IndexFlag.*; |
| | | |
| | | import java.util.ArrayList; |
| | | import java.util.EnumSet; |
| | | import java.util.HashSet; |
| | | import java.util.List; |
| | | import java.util.Map; |
| | | import java.util.Set; |
| | | import java.util.TreeMap; |
| | | |
| | | import org.forgerock.i18n.slf4j.LocalizedLogger; |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.ConditionResult; |
| | | import org.forgerock.opendj.ldap.spi.IndexingOptions; |
| | | import org.forgerock.util.promise.NeverThrowsException; |
| | | import org.opends.server.backends.pluggable.CursorTransformer.ValueTransformer; |
| | | import org.opends.server.backends.pluggable.EntryIDSet.EntryIDSetCodec; |
| | | import org.opends.server.backends.pluggable.IndexBuffer.BufferedIndexValues; |
| | | import org.opends.server.backends.pluggable.State.IndexFlag; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.UpdateFunction; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | import org.opends.server.types.Entry; |
| | | import org.opends.server.types.Modification; |
| | | import org.opends.server.util.StaticUtils; |
| | | |
| | | /** |
| | | * Represents an index implemented by a tree in which each key maps to |
| | | * a set of entry IDs. The key is a byte array, and is constructed from some |
| | | * normalized form of an attribute value (or fragment of a value) appearing |
| | | * in the entry. |
| | | * Represents an index implemented by a tree in which each key maps to a set of entry IDs. The key |
| | | * is a byte array, and is constructed from some normalized form of an attribute value (or fragment |
| | | * of a value) appearing in the entry. |
| | | */ |
| | | class Index extends DatabaseContainer |
| | | interface Index extends DatabaseContainer |
| | | { |
| | | private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); |
| | | EntryIDSet get(ReadableTransaction txn, ByteSequence key); |
| | | |
| | | /** The indexer object to construct index keys from LDAP attribute values. */ |
| | | private Indexer indexer; |
| | | int getIndexEntryLimit(); |
| | | |
| | | /** The limit on the number of entry IDs that may be indexed by one key. */ |
| | | private int indexEntryLimit; |
| | | /** |
| | | * Limit on the number of entry IDs that may be retrieved by cursoring |
| | | * through an index. |
| | | */ |
| | | private final int cursorEntryLimit; |
| | | /** |
| | | * Number of keys that have exceeded the entry limit since this |
| | | * object was created. |
| | | */ |
| | | private int entryLimitExceededCount; |
| | | boolean getMaintainCount(); |
| | | |
| | | /** |
| | | * Whether to maintain a count of IDs for a key once the entry limit |
| | | * has exceeded. |
| | | */ |
| | | private final boolean maintainCount; |
| | | // Ignores trusted state. |
| | | void importPut(WriteableTransaction txn, ImportIDSet idsToBeAdded); |
| | | |
| | | private final State state; |
| | | // Ignores trusted state. |
| | | void importRemove(WriteableTransaction txn, ImportIDSet idsToBeRemoved); |
| | | |
| | | private final EntryIDSetCodec codec; |
| | | boolean isTrusted(); |
| | | |
| | | /** |
| | | * A flag to indicate if this index should be trusted to be consistent |
| | | * with the entries database. If not trusted, we assume that existing |
| | | * entryIDSets for a key is still accurate. However, keys that do not |
| | | * exist are undefined instead of an empty entryIDSet. The following |
| | | * rules will be observed when the index is not trusted: |
| | | * |
| | | * - no entryIDs will be added to a non-existing key. |
| | | * - undefined entryIdSet will be returned whenever a key is not found. |
| | | */ |
| | | private boolean trusted; |
| | | Cursor<ByteString, EntryIDSet> openCursor(ReadableTransaction txn); |
| | | |
| | | /** |
| | | * Create a new index object. |
| | | * @param name The name of the index database within the entryContainer. |
| | | * @param indexer The indexer object to construct index keys from LDAP |
| | | * attribute values. |
| | | * @param state The state database to persist index state info. |
| | | * @param indexEntryLimit The configured limit on the number of entry IDs |
| | | * that may be indexed by one key. |
| | | * @param cursorEntryLimit The configured limit on the number of entry IDs |
| | | * @param maintainCount Whether to maintain a count of IDs for a key once |
| | | * the entry limit has exceeded. |
| | | * @param txn a non null database transaction |
| | | * @param entryContainer The database entryContainer holding this index. |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | Index(TreeName name, Indexer indexer, State state, int indexEntryLimit, int cursorEntryLimit, boolean maintainCount, |
| | | WriteableTransaction txn, EntryContainer entryContainer) throws StorageRuntimeException |
| | | { |
| | | super(name); |
| | | this.indexer = indexer; |
| | | this.indexEntryLimit = indexEntryLimit; |
| | | this.cursorEntryLimit = cursorEntryLimit; |
| | | this.maintainCount = maintainCount; |
| | | this.state = state; |
| | | boolean setIndexEntryLimit(int indexEntryLimit); |
| | | |
| | | final EnumSet<IndexFlag> flags = state.getIndexFlags(txn, getName()); |
| | | this.codec = flags.contains(COMPACTED) ? CODEC_V2 : CODEC_V1; |
| | | this.trusted = flags.contains(TRUSTED); |
| | | if (!trusted && entryContainer.getHighestEntryID(txn).longValue() == 0) |
| | | { |
| | | // If there are no entries in the entry container then there |
| | | // is no reason why this index can't be upgraded to trusted. |
| | | setTrusted(txn, true); |
| | | } |
| | | } |
| | | void setTrusted(WriteableTransaction txn, boolean trusted); |
| | | |
| | | void indexEntry(Entry entry, Set<ByteString> keys, IndexingOptions options) |
| | | { |
| | | indexer.indexEntry(entry, keys, options); |
| | | } |
| | | |
| | | final void insertID(IndexBuffer buffer, ByteString keyBytes, EntryID entryID) |
| | | { |
| | | getBufferedIndexValues(buffer, keyBytes).addEntryID(keyBytes, entryID); |
| | | } |
| | | |
| | | final Cursor<ByteString, EntryIDSet> openCursor(ReadableTransaction txn) { |
| | | checkNotNull(txn, "txn must not be null"); |
| | | return CursorTransformer.transformValues(txn.openCursor(getName()), |
| | | new ValueTransformer<ByteString, ByteString, EntryIDSet, NeverThrowsException>() |
| | | { |
| | | @Override |
| | | public EntryIDSet transform(ByteString key, ByteString value) throws NeverThrowsException |
| | | { |
| | | return codec.decode(key, value); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | /** |
| | | * Delete the specified import ID set from the import ID set associated with the key. |
| | | * |
| | | * @param txn a non null database transaction |
| | | * @param importIdSet The import ID set to delete. |
| | | * @throws StorageRuntimeException If a database error occurs. |
| | | */ |
| | | final void delete(WriteableTransaction txn, ImportIDSet importIdSet) throws StorageRuntimeException |
| | | { |
| | | ByteSequence key = importIdSet.getKey(); |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) { |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount); |
| | | importIDSet.remove(importIdSet); |
| | | if (importIDSet.isDefined() && importIDSet.size() == 0) |
| | | { |
| | | txn.delete(getName(), key); |
| | | } |
| | | else |
| | | { |
| | | value = importIDSet.valueToByteString(codec); |
| | | txn.put(getName(), key, value); |
| | | } |
| | | } else { |
| | | // Should never happen -- the keys should always be there. |
| | | throw new RuntimeException(); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Insert the specified import ID set into this index. Creates a DB cursor if needed. |
| | | * |
| | | * @param txn a non null database transaction |
| | | * @param importIdSet The set of import IDs. |
| | | * @throws StorageRuntimeException If a database error occurs. |
| | | */ |
| | | final void insert(WriteableTransaction txn, ImportIDSet importIdSet) throws StorageRuntimeException |
| | | { |
| | | ByteSequence key = importIdSet.getKey(); |
| | | ByteString value = txn.read(getName(), key); |
| | | if(value != null) { |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount); |
| | | if (importIDSet.merge(importIdSet)) { |
| | | entryLimitExceededCount++; |
| | | } |
| | | value = importIDSet.valueToByteString(codec); |
| | | } else { |
| | | if(!importIdSet.isDefined()) { |
| | | entryLimitExceededCount++; |
| | | } |
| | | value = importIdSet.valueToByteString(codec); |
| | | } |
| | | txn.put(getName(), key, value); |
| | | } |
| | | |
| | | void updateKey(WriteableTransaction txn, ByteString key, EntryIDSet deletedIDs, EntryIDSet addedIDs) |
| | | throws StorageRuntimeException |
| | | { |
| | | /* |
| | | * Check the special condition where both deletedIDs and addedIDs are null. This is used when |
| | | * deleting entries and corresponding id2children and id2subtree records must be completely |
| | | * removed. |
| | | */ |
| | | if (deletedIDs == null && addedIDs == null) |
| | | { |
| | | boolean success = txn.delete(getName(), key); |
| | | if (success && logger.isTraceEnabled()) |
| | | { |
| | | StringBuilder builder = new StringBuilder(); |
| | | StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4); |
| | | logger.trace("The expected key does not exist in the index %s.\nKey:%s ", getName(), builder); |
| | | } |
| | | return; |
| | | } |
| | | |
| | | // Handle cases where nothing is changed early to avoid DB access. |
| | | if (isNullOrEmpty(deletedIDs) && isNullOrEmpty(addedIDs)) |
| | | { |
| | | return; |
| | | } |
| | | |
| | | if (maintainCount) |
| | | { |
| | | updateKeyWithRMW(txn, key, deletedIDs, addedIDs); |
| | | } |
| | | else |
| | | { |
| | | /* |
| | | * Avoid taking a write lock on a record which has hit all IDs because it is likely to be a |
| | | * point of contention. |
| | | */ |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | EntryIDSet entryIDSet = codec.decode(key, value); |
| | | if (entryIDSet.isDefined()) |
| | | { |
| | | updateKeyWithRMW(txn, key, deletedIDs, addedIDs); |
| | | } // else the record exists but we've hit all IDs. |
| | | } |
| | | else if (trusted) |
| | | { |
| | | /* |
| | | * The key was not present, but we cannot simply add it because another thread may have |
| | | * added since. |
| | | */ |
| | | updateKeyWithRMW(txn, key, deletedIDs, addedIDs); |
| | | } |
| | | } |
| | | } |
| | | |
| | | private boolean isNullOrEmpty(EntryIDSet entryIDSet) |
| | | { |
| | | return entryIDSet == null || entryIDSet.size() == 0; |
| | | } |
| | | |
| | | private boolean isNotEmpty(EntryIDSet entryIDSet) |
| | | { |
| | | return entryIDSet != null && entryIDSet.size() > 0; |
| | | } |
| | | |
| | | private void updateKeyWithRMW(final WriteableTransaction txn, final ByteString key, final EntryIDSet deletedIDs, |
| | | final EntryIDSet addedIDs) throws StorageRuntimeException |
| | | { |
| | | txn.update(getName(), key, new UpdateFunction() |
| | | { |
| | | @Override |
| | | public ByteSequence computeNewValue(final ByteSequence oldValue) |
| | | { |
| | | if (oldValue != null) |
| | | { |
| | | EntryIDSet entryIDSet = computeEntryIDSet(key, oldValue.toByteString(), deletedIDs, addedIDs); |
| | | ByteString after = codec.encode(entryIDSet); |
| | | /* |
| | | * If there are no more IDs then return null indicating that the record should be removed. |
| | | * If index is not trusted then this will cause all subsequent reads for this key to |
| | | * return undefined set. |
| | | */ |
| | | return after.isEmpty() ? null : after; |
| | | } |
| | | else if (trusted) |
| | | { |
| | | if (deletedIDs != null) |
| | | { |
| | | logIndexCorruptError(txn, key); |
| | | } |
| | | if (isNotEmpty(addedIDs)) |
| | | { |
| | | return codec.encode(addedIDs); |
| | | } |
| | | } |
| | | return null; // no change. |
| | | } |
| | | }); |
| | | } |
| | | |
| | | private EntryIDSet computeEntryIDSet(ByteString key, ByteString value, EntryIDSet deletedIDs, EntryIDSet addedIDs) |
| | | { |
| | | EntryIDSet entryIDSet = codec.decode(key, value); |
| | | if(addedIDs != null) |
| | | { |
| | | if(entryIDSet.isDefined() && indexEntryLimit > 0) |
| | | { |
| | | long idCountDelta = addedIDs.size(); |
| | | if(deletedIDs != null) |
| | | { |
| | | idCountDelta -= deletedIDs.size(); |
| | | } |
| | | if(idCountDelta + entryIDSet.size() >= indexEntryLimit) |
| | | { |
| | | if(maintainCount) |
| | | { |
| | | entryIDSet = newUndefinedSetWithSize(key, entryIDSet.size() + idCountDelta); |
| | | } |
| | | else |
| | | { |
| | | entryIDSet = newUndefinedSet(); |
| | | } |
| | | entryLimitExceededCount++; |
| | | |
| | | if(logger.isTraceEnabled()) |
| | | { |
| | | StringBuilder builder = new StringBuilder(); |
| | | StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4); |
| | | logger.trace("Index entry exceeded in index %s. " + |
| | | "Limit: %d. ID list size: %d.\nKey:%s", |
| | | getName(), indexEntryLimit, idCountDelta + addedIDs.size(), builder); |
| | | |
| | | } |
| | | } |
| | | else |
| | | { |
| | | entryIDSet.addAll(addedIDs); |
| | | if(deletedIDs != null) |
| | | { |
| | | entryIDSet.removeAll(deletedIDs); |
| | | } |
| | | } |
| | | } |
| | | else |
| | | { |
| | | entryIDSet.addAll(addedIDs); |
| | | if(deletedIDs != null) |
| | | { |
| | | entryIDSet.removeAll(deletedIDs); |
| | | } |
| | | } |
| | | } |
| | | else if(deletedIDs != null) |
| | | { |
| | | entryIDSet.removeAll(deletedIDs); |
| | | } |
| | | return entryIDSet; |
| | | } |
| | | |
| | | final void removeID(IndexBuffer buffer, ByteString keyBytes, EntryID entryID) |
| | | { |
| | | getBufferedIndexValues(buffer, keyBytes).deleteEntryID(keyBytes, entryID); |
| | | } |
| | | |
| | | private void logIndexCorruptError(WriteableTransaction txn, ByteString key) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | StringBuilder builder = new StringBuilder(); |
| | | StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4); |
| | | logger.trace("The expected key does not exist in the index %s.\nKey:%s", getName(), builder); |
| | | } |
| | | |
| | | setTrusted(txn, false); |
| | | logger.error(ERR_JEB_INDEX_CORRUPT_REQUIRES_REBUILD, getName()); |
| | | } |
| | | |
| | | void delete(IndexBuffer buffer, ByteString keyBytes) |
| | | { |
| | | getBufferedIndexValues(buffer, keyBytes); |
| | | } |
| | | |
| | | private BufferedIndexValues getBufferedIndexValues(IndexBuffer buffer, ByteString keyBytes) |
| | | { |
| | | return buffer.getBufferedIndexValues(this, keyBytes); |
| | | } |
| | | |
| | | /** |
| | | * Check if an entry ID is in the set of IDs indexed by a given key. |
| | | * |
| | | * @param txn |
| | | * A database transaction. |
| | | * @param key |
| | | * The index key. |
| | | * @param entryID |
| | | * The entry ID. |
| | | * @return true if the entry ID is indexed by the given key, false if it is not indexed by the |
| | | * given key, undefined if the key has exceeded the entry limit. |
| | | * @throws StorageRuntimeException |
| | | * If an error occurs in the database. |
| | | */ |
| | | ConditionResult containsID(ReadableTransaction txn, ByteString key, EntryID entryID) |
| | | throws StorageRuntimeException |
| | | { |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | EntryIDSet entryIDSet = codec.decode(key, value); |
| | | if (entryIDSet.isDefined()) |
| | | { |
| | | return ConditionResult.valueOf(entryIDSet.contains(entryID)); |
| | | } |
| | | return ConditionResult.UNDEFINED; |
| | | } |
| | | return trusted ? ConditionResult.FALSE : ConditionResult.UNDEFINED; |
| | | } |
| | | |
| | | /** |
| | | * Reads the value associated to a key. |
| | | * |
| | | * @param txn a non null database transaction |
| | | * @param key The key to read |
| | | * @return The non null set of entry IDs. |
| | | */ |
| | | EntryIDSet read(ReadableTransaction txn, ByteSequence key) |
| | | { |
| | | try |
| | | { |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | return codec.decode(key, value); |
| | | } |
| | | return trusted ? newDefinedSet() : newUndefinedSet(); |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | return newUndefinedSet(); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Reads a range of keys and collects all their entry IDs into a |
| | | * single set. |
| | | * |
| | | * @param txn a non null database transaction |
| | | * @param lower The lower bound of the range. A 0 length byte array indicates |
| | | * no lower bound and the range will start from the |
| | | * smallest key. |
| | | * @param upper The upper bound of the range. A 0 length byte array indicates |
| | | * no upper bound and the range will end at the largest |
| | | * key. |
| | | * @param lowerIncluded true if a key exactly matching the lower bound |
| | | * is included in the range, false if only keys |
| | | * strictly greater than the lower bound are included. |
| | | * This value is ignored if the lower bound is not |
| | | * specified. |
| | | * @param upperIncluded true if a key exactly matching the upper bound |
| | | * is included in the range, false if only keys |
| | | * strictly less than the upper bound are included. |
| | | * This value is ignored if the upper bound is not |
| | | * specified. |
| | | * @return The non null set of entry IDs. |
| | | */ |
| | | EntryIDSet readRange(ReadableTransaction txn, |
| | | ByteSequence lower, ByteSequence upper, boolean lowerIncluded, boolean upperIncluded) |
| | | { |
| | | // If this index is not trusted, then just return an undefined id set. |
| | | if (!trusted) |
| | | { |
| | | return newUndefinedSet(); |
| | | } |
| | | |
| | | try |
| | | { |
| | | // Total number of IDs found so far. |
| | | int totalIDCount = 0; |
| | | |
| | | ArrayList<EntryIDSet> sets = new ArrayList<EntryIDSet>(); |
| | | |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(getName()); |
| | | try |
| | | { |
| | | boolean success; |
| | | // Set the lower bound if necessary. |
| | | if (lower.length() > 0) |
| | | { |
| | | // Initialize the cursor to the lower bound. |
| | | success = cursor.positionToKeyOrNext(lower); |
| | | |
| | | // Advance past the lower bound if necessary. |
| | | if (success && !lowerIncluded && cursor.getKey().equals(lower)) |
| | | { |
| | | // Do not include the lower value. |
| | | success = cursor.next(); |
| | | } |
| | | } |
| | | else |
| | | { |
| | | success = cursor.next(); |
| | | } |
| | | |
| | | if (!success) |
| | | { |
| | | // There are no values. |
| | | return newDefinedSet(); |
| | | } |
| | | |
| | | // Step through the keys until we hit the upper bound or the last key. |
| | | while (success) |
| | | { |
| | | // Check against the upper bound if necessary |
| | | if (upper.length() > 0) |
| | | { |
| | | int cmp = cursor.getKey().compareTo(upper); |
| | | if (cmp > 0 || (cmp == 0 && !upperIncluded)) |
| | | { |
| | | break; |
| | | } |
| | | } |
| | | |
| | | EntryIDSet set = codec.decode(cursor.getKey(), cursor.getValue()); |
| | | if (!set.isDefined()) |
| | | { |
| | | // There is no point continuing. |
| | | return set; |
| | | } |
| | | totalIDCount += set.size(); |
| | | if (cursorEntryLimit > 0 && totalIDCount > cursorEntryLimit) |
| | | { |
| | | // There are too many. Give up and return an undefined list. |
| | | return newUndefinedSet(); |
| | | } |
| | | sets.add(set); |
| | | success = cursor.next(); |
| | | } |
| | | |
| | | return newSetFromUnion(sets); |
| | | } |
| | | finally |
| | | { |
| | | cursor.close(); |
| | | } |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | return newUndefinedSet(); |
| | | } |
| | | } |
| | | |
| | | int getEntryLimitExceededCount() |
| | | { |
| | | return entryLimitExceededCount; |
| | | } |
| | | |
| | | void addEntry(IndexBuffer buffer, EntryID entryID, Entry entry, IndexingOptions options) |
| | | throws StorageRuntimeException |
| | | { |
| | | HashSet<ByteString> addKeys = new HashSet<ByteString>(); |
| | | indexer.indexEntry(entry, addKeys, options); |
| | | |
| | | for (ByteString keyBytes : addKeys) |
| | | { |
| | | insertID(buffer, keyBytes, entryID); |
| | | } |
| | | } |
| | | |
| | | void removeEntry(IndexBuffer buffer, EntryID entryID, Entry entry, IndexingOptions options) |
| | | throws StorageRuntimeException |
| | | { |
| | | HashSet<ByteString> delKeys = new HashSet<ByteString>(); |
| | | indexer.indexEntry(entry, delKeys, options); |
| | | |
| | | for (ByteString keyBytes : delKeys) |
| | | { |
| | | removeID(buffer, keyBytes, entryID); |
| | | } |
| | | } |
| | | |
| | | void modifyEntry(IndexBuffer buffer, EntryID entryID, Entry oldEntry, Entry newEntry, List<Modification> mods, |
| | | IndexingOptions options) throws StorageRuntimeException |
| | | { |
| | | TreeMap<ByteString, Boolean> modifiedKeys = new TreeMap<ByteString, Boolean>(); |
| | | indexer.modifyEntry(oldEntry, newEntry, mods, modifiedKeys, options); |
| | | |
| | | for (Map.Entry<ByteString, Boolean> modifiedKey : modifiedKeys.entrySet()) |
| | | { |
| | | if(modifiedKey.getValue()) |
| | | { |
| | | insertID(buffer, modifiedKey.getKey(), entryID); |
| | | } |
| | | else |
| | | { |
| | | removeID(buffer, modifiedKey.getKey(), entryID); |
| | | } |
| | | } |
| | | } |
| | | |
| | | boolean setIndexEntryLimit(int indexEntryLimit) |
| | | { |
| | | final boolean rebuildRequired = this.indexEntryLimit < indexEntryLimit && entryLimitExceededCount > 0; |
| | | this.indexEntryLimit = indexEntryLimit; |
| | | return rebuildRequired; |
| | | } |
| | | |
| | | final void setIndexer(Indexer indexer) |
| | | { |
| | | this.indexer = indexer; |
| | | } |
| | | |
| | | int getIndexEntryLimit() |
| | | { |
| | | return indexEntryLimit; |
| | | } |
| | | |
| | | synchronized void setTrusted(WriteableTransaction txn, boolean trusted) throws StorageRuntimeException |
| | | { |
| | | this.trusted = trusted; |
| | | if (trusted) { |
| | | state.addFlagsToIndex(txn, getName(), TRUSTED); |
| | | } else { |
| | | state.removeFlagsFromIndex(txn, getName(), TRUSTED); |
| | | } |
| | | } |
| | | |
| | | synchronized boolean isTrusted() |
| | | { |
| | | return trusted; |
| | | } |
| | | |
| | | synchronized boolean isRebuildRunning() |
| | | { |
| | | return false; // FIXME inline? |
| | | } |
| | | |
| | | boolean getMaintainCount() |
| | | { |
| | | return maintainCount; |
| | | } |
| | | void update(WriteableTransaction txn, ByteString key, EntryIDSet deletedIDs, EntryIDSet addedIDs); |
| | | } |
| | |
| | | * state is only ever used when updating the id2children and id2subtree indexes when deleting an |
| | | * entry. |
| | | */ |
| | | static class BufferedIndexValues |
| | | private static class BufferedIndexValues |
| | | { |
| | | private EntryIDSet addedIDs; |
| | | private EntryIDSet deletedIDs; |
| | | private EntryIDSet addedEntryIDs; |
| | | private EntryIDSet deletedEntryIDs; |
| | | |
| | | /** |
| | | * Adds the provided entryID to this object associating it with the provided keyBytes. |
| | | * |
| | | * @param keyBytes the keyBytes mapping for this entryID |
| | | * @param entryID the entryID to add |
| | | */ |
| | | void addEntryID(ByteString keyBytes, EntryID entryID) |
| | | void addEntryID(EntryID entryID) |
| | | { |
| | | if (!remove(deletedIDs, entryID)) |
| | | if (!remove(deletedEntryIDs, entryID)) |
| | | { |
| | | if (this.addedIDs == null) |
| | | if (this.addedEntryIDs == null) |
| | | { |
| | | this.addedIDs = newDefinedSet(); |
| | | this.addedEntryIDs = newDefinedSet(); |
| | | } |
| | | this.addedIDs.add(entryID); |
| | | this.addedEntryIDs.add(entryID); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Deletes the provided entryID from this object. |
| | | * |
| | | * @param keyBytes the keyBytes mapping for this entryID |
| | | * @param entryID the entryID to delete |
| | | */ |
| | | void deleteEntryID(ByteString keyBytes, EntryID entryID) |
| | | void deleteEntryID(EntryID entryID) |
| | | { |
| | | if (!remove(addedIDs, entryID)) |
| | | if (!remove(addedEntryIDs, entryID)) |
| | | { |
| | | if (this.deletedIDs == null) |
| | | if (this.deletedEntryIDs == null) |
| | | { |
| | | this.deletedIDs = newDefinedSet(); |
| | | this.deletedEntryIDs = newDefinedSet(); |
| | | } |
| | | this.deletedIDs.add(entryID); |
| | | this.deletedEntryIDs.add(entryID); |
| | | } |
| | | } |
| | | |
| | | private boolean remove(EntryIDSet ids, EntryID entryID) |
| | | private static boolean remove(EntryIDSet entryIDs, EntryID entryID) |
| | | { |
| | | if (ids != null && ids.contains(entryID)) |
| | | { |
| | | ids.remove(entryID); |
| | | return true; |
| | | } |
| | | return false; |
| | | return entryIDs != null ? entryIDs.remove(entryID) : false; |
| | | } |
| | | } |
| | | |
| | | /** A simple class representing a pair of added and deleted VLV values. */ |
| | | static class BufferedVLVIndexValues |
| | | private static class BufferedVLVIndexValues |
| | | { |
| | | private TreeSet<ByteString> addedValues; |
| | | private TreeSet<ByteString> deletedValues; |
| | | private TreeSet<ByteString> addedSortKeys; |
| | | private TreeSet<ByteString> deletedSortKeys; |
| | | |
| | | /** |
| | | * Adds the provided values to this object. |
| | | * |
| | | * @param sortValues the values to add |
| | | */ |
| | | void addValues(ByteString sortValues) |
| | | void addSortKey(ByteString sortKey) |
| | | { |
| | | if (!remove(deletedValues, sortValues)) |
| | | if (!remove(deletedSortKeys, sortKey)) |
| | | { |
| | | if (addedValues == null) |
| | | if (addedSortKeys == null) |
| | | { |
| | | addedValues = new TreeSet<ByteString>(); |
| | | addedSortKeys = new TreeSet<ByteString>(); |
| | | } |
| | | addedValues.add(sortValues); |
| | | addedSortKeys.add(sortKey); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Deletes the provided values from this object. |
| | | * |
| | | * @param sortValues the values to delete |
| | | */ |
| | | void deleteValues(ByteString sortValues) |
| | | void deleteSortKey(ByteString sortKey) |
| | | { |
| | | if (!remove(addedValues, sortValues)) |
| | | if (!remove(addedSortKeys, sortKey)) |
| | | { |
| | | if (deletedValues == null) |
| | | if (deletedSortKeys == null) |
| | | { |
| | | deletedValues = new TreeSet<ByteString>(); |
| | | deletedSortKeys = new TreeSet<ByteString>(); |
| | | } |
| | | deletedValues.add(sortValues); |
| | | deletedSortKeys.add(sortKey); |
| | | } |
| | | } |
| | | |
| | | private boolean remove(TreeSet<ByteString> values, ByteString sortValues) |
| | | private static boolean remove(TreeSet<ByteString> sortKeys, ByteString sortKey) |
| | | { |
| | | if (values != null && values.contains(sortValues)) |
| | | { |
| | | values.remove(sortValues); |
| | | return true; |
| | | } |
| | | return false; |
| | | return sortKeys != null ? sortKeys.remove(sortKey) : false; |
| | | } |
| | | } |
| | | |
| | |
| | | this.entryContainer = entryContainer; |
| | | } |
| | | |
| | | /** |
| | | * Get the buffered VLV values for the given VLV index. |
| | | * |
| | | * @param vlvIndex The VLV index with the buffered values to retrieve. |
| | | * @return The buffered VLV values or <code>null</code> if there are |
| | | * no buffered VLV values for the specified VLV index. |
| | | */ |
| | | BufferedVLVIndexValues getBufferedVLVIndexValues(VLVIndex vlvIndex) |
| | | private BufferedVLVIndexValues createOrGetBufferedVLVIndexValues(VLVIndex vlvIndex) |
| | | { |
| | | BufferedVLVIndexValues bufferedValues = bufferedVLVIndexes.get(vlvIndex); |
| | | if (bufferedValues == null) |
| | |
| | | return bufferedValues; |
| | | } |
| | | |
| | | /** |
| | | * Get the buffered index values for the given index and keyBytes. |
| | | * |
| | | * @param index |
| | | * The index for which to retrieve the buffered index values |
| | | * @param keyBytes |
| | | * The keyBytes for which to retrieve the buffered index values |
| | | * @return The buffered index values, it can never be null |
| | | */ |
| | | BufferedIndexValues getBufferedIndexValues(Index index, ByteString keyBytes) |
| | | private BufferedIndexValues createOrGetBufferedIndexValues(Index index, ByteString keyBytes) |
| | | { |
| | | BufferedIndexValues values = null; |
| | | |
| | |
| | | { |
| | | for (Index index : attributeIndex.getAllIndexes()) |
| | | { |
| | | updateKeys(index, txn, bufferedIndexes.remove(index)); |
| | | flushIndex(index, txn, bufferedIndexes.remove(index)); |
| | | } |
| | | } |
| | | |
| | |
| | | BufferedVLVIndexValues bufferedVLVValues = bufferedVLVIndexes.remove(vlvIndex); |
| | | if (bufferedVLVValues != null) |
| | | { |
| | | vlvIndex.updateIndex(txn, bufferedVLVValues.addedValues, bufferedVLVValues.deletedValues); |
| | | vlvIndex.updateIndex(txn, bufferedVLVValues.addedSortKeys, bufferedVLVValues.deletedSortKeys); |
| | | } |
| | | } |
| | | |
| | | final Index id2children = entryContainer.getID2Children(); |
| | | updateKeys(id2children, txn, bufferedIndexes.remove(id2children)); |
| | | flushIndex(id2children, txn, bufferedIndexes.remove(id2children)); |
| | | |
| | | final Index id2subtree = entryContainer.getID2Subtree(); |
| | | final TreeMap<ByteString, BufferedIndexValues> bufferedValues = bufferedIndexes.remove(id2subtree); |
| | |
| | | * entry processing in add/delete processing. This is necessary in order |
| | | * to avoid deadlocks. |
| | | */ |
| | | updateKeys(id2subtree, txn, bufferedValues.descendingMap()); |
| | | flushIndex(id2subtree, txn, bufferedValues.descendingMap()); |
| | | } |
| | | } |
| | | |
| | | private void updateKeys(Index index, WriteableTransaction txn, |
| | | void put(Index index, ByteString key, EntryID entryID) |
| | | { |
| | | createOrGetBufferedIndexValues(index, key).addEntryID(entryID); |
| | | } |
| | | |
| | | void put(VLVIndex index, ByteString sortKey) |
| | | { |
| | | createOrGetBufferedVLVIndexValues(index).addSortKey(sortKey); |
| | | } |
| | | |
| | | void remove(VLVIndex index, ByteString sortKey) |
| | | { |
| | | createOrGetBufferedVLVIndexValues(index).deleteSortKey(sortKey); |
| | | } |
| | | |
| | | void remove(Index index, ByteString key) |
| | | { |
| | | createOrGetBufferedIndexValues(index, key); |
| | | } |
| | | |
| | | void remove(Index index, ByteString key, EntryID entryID) |
| | | { |
| | | createOrGetBufferedIndexValues(index, key).deleteEntryID(entryID); |
| | | } |
| | | |
| | | private void flushIndex(Index index, WriteableTransaction txn, |
| | | Map<ByteString, BufferedIndexValues> bufferedValues) |
| | | { |
| | | if (bufferedValues != null) |
| | |
| | | final Map.Entry<ByteString, BufferedIndexValues> entry = it.next(); |
| | | final ByteString key = entry.getKey(); |
| | | final BufferedIndexValues values = entry.getValue(); |
| | | |
| | | index.updateKey(txn, key, values.deletedIDs, values.addedIDs); |
| | | |
| | | index.update(txn, key, values.deletedEntryIDs, values.addedEntryIDs); |
| | | it.remove(); |
| | | } |
| | | } |
| | |
| | | */ |
| | | private EntryIDSet evaluateFilter(SearchFilter filter) |
| | | { |
| | | EntryIDSet candidates = evaluate(filter); |
| | | EntryIDSet candidates = evaluateFilter0(filter); |
| | | if (buffer != null) |
| | | { |
| | | candidates.toString(buffer); |
| | |
| | | return candidates; |
| | | } |
| | | |
| | | private EntryIDSet evaluate(SearchFilter filter) |
| | | private EntryIDSet evaluateFilter0(SearchFilter filter) |
| | | { |
| | | switch (filter.getFilterType()) |
| | | { |
| | |
| | | import static org.opends.messages.JebMessages.*; |
| | | import static org.opends.server.backends.pluggable.EntryIDSet.newUndefinedSet; |
| | | |
| | | import java.util.ArrayList; |
| | | import java.util.Collection; |
| | | |
| | | import org.forgerock.i18n.LocalizableMessageBuilder; |
| | | import org.forgerock.i18n.slf4j.LocalizedLogger; |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.spi.IndexQueryFactory; |
| | | import org.forgerock.opendj.ldap.spi.IndexingOptions; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | | |
| | | /** |
| | | * This class is an implementation of IndexQueryFactory which creates |
| | |
| | | */ |
| | | final class IndexQueryFactoryImpl implements IndexQueryFactory<IndexQuery> |
| | | { |
| | | private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); |
| | | |
| | | private static final String PRESENCE_INDEX_KEY = "presence"; |
| | | |
| | |
| | | return createMatchAllQuery().evaluate(debugMessage); |
| | | } |
| | | |
| | | final EntryIDSet entrySet = index.read(txn, key); |
| | | final EntryIDSet entrySet = index.get(txn, key); |
| | | if (debugMessage != null && !entrySet.isDefined()) |
| | | { |
| | | updateStatsUndefinedResults(debugMessage, index); |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public IndexQuery createRangeMatchQuery(final String indexID, |
| | | final ByteSequence lowerBound, final ByteSequence upperBound, |
| | | final boolean includeLowerBound, final boolean includeUpperBound) |
| | | public IndexQuery createRangeMatchQuery(final String indexID, final ByteSequence lowerBound, |
| | | final ByteSequence upperBound, final boolean includeLowerBound, final boolean includeUpperBound) |
| | | { |
| | | return new IndexQuery() |
| | | { |
| | | @Override |
| | | public EntryIDSet evaluate(LocalizableMessageBuilder debugMessage) |
| | | { |
| | | @Override |
| | | public EntryIDSet evaluate(LocalizableMessageBuilder debugMessage) |
| | | // Find the right index. |
| | | final Index index = attributeIndex.getIndexById(indexID); |
| | | if (index == null) |
| | | { |
| | | // Find the right index. |
| | | final Index index = attributeIndex.getIndexById(indexID); |
| | | if (index == null) |
| | | if (debugMessage != null) |
| | | { |
| | | if(debugMessage != null) |
| | | { |
| | | debugMessage.append(INFO_JEB_INDEX_FILTER_INDEX_TYPE_DISABLED.get(indexID, "")); |
| | | } |
| | | return createMatchAllQuery().evaluate(debugMessage); |
| | | debugMessage.append(INFO_JEB_INDEX_FILTER_INDEX_TYPE_DISABLED.get(indexID, "")); |
| | | } |
| | | |
| | | final EntryIDSet entrySet = index.readRange(txn, lowerBound, upperBound, |
| | | includeLowerBound, includeUpperBound); |
| | | if(debugMessage != null && !entrySet.isDefined()) |
| | | { |
| | | updateStatsUndefinedResults(debugMessage, index); |
| | | } |
| | | return entrySet; |
| | | return createMatchAllQuery().evaluate(debugMessage); |
| | | } |
| | | |
| | | final EntryIDSet entrySet = readRange(index, txn, lowerBound, upperBound, includeLowerBound, includeUpperBound); |
| | | if (debugMessage != null && !entrySet.isDefined()) |
| | | { |
| | | updateStatsUndefinedResults(debugMessage, index); |
| | | } |
| | | return entrySet; |
| | | } |
| | | |
| | | private final EntryIDSet readRange(Index index, ReadableTransaction txn, ByteSequence lower, ByteSequence upper, |
| | | boolean lowerIncluded, boolean upperIncluded) |
| | | { |
| | | // If this index is not trusted, then just return an undefined id set. |
| | | if (!index.isTrusted()) |
| | | { |
| | | return newUndefinedSet(); |
| | | } |
| | | |
| | | try |
| | | { |
| | | // Total number of IDs found so far. |
| | | int totalIDCount = 0; |
| | | ArrayList<EntryIDSet> sets = new ArrayList<EntryIDSet>(); |
| | | Cursor<ByteString, EntryIDSet> cursor = index.openCursor(txn); |
| | | try |
| | | { |
| | | boolean success; |
| | | // Set the lower bound if necessary. |
| | | if (lower.length() > 0) |
| | | { |
| | | // Initialize the cursor to the lower bound. |
| | | success = cursor.positionToKeyOrNext(lower); |
| | | |
| | | // Advance past the lower bound if necessary. |
| | | if (success && !lowerIncluded && cursor.getKey().equals(lower)) |
| | | { |
| | | // Do not include the lower value. |
| | | success = cursor.next(); |
| | | } |
| | | } |
| | | else |
| | | { |
| | | success = cursor.next(); |
| | | } |
| | | |
| | | if (!success) |
| | | { |
| | | // There are no values. |
| | | return EntryIDSet.newDefinedSet(); |
| | | } |
| | | |
| | | // Step through the keys until we hit the upper bound or the last key. |
| | | while (success) |
| | | { |
| | | // Check against the upper bound if necessary |
| | | if (upper.length() > 0) |
| | | { |
| | | int cmp = cursor.getKey().compareTo(upper); |
| | | if (cmp > 0 || (cmp == 0 && !upperIncluded)) |
| | | { |
| | | break; |
| | | } |
| | | } |
| | | |
| | | EntryIDSet set = cursor.getValue(); |
| | | if (!set.isDefined()) |
| | | { |
| | | // There is no point continuing. |
| | | return set; |
| | | } |
| | | totalIDCount += set.size(); |
| | | if (totalIDCount > IndexFilter.CURSOR_ENTRY_LIMIT) |
| | | { |
| | | // There are too many. Give up and return an undefined list. |
| | | return newUndefinedSet(); |
| | | } |
| | | sets.add(set); |
| | | success = cursor.next(); |
| | | } |
| | | |
| | | return EntryIDSet.newSetFromUnion(sets); |
| | | } |
| | | finally |
| | | { |
| | | cursor.close(); |
| | | } |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | return newUndefinedSet(); |
| | | } |
| | | } |
| | | |
| | | @Override |
| | | public String toString() |
| | | { |
| | |
| | | return newUndefinedSet(); |
| | | } |
| | | |
| | | final EntryIDSet entrySet = index.read(txn, PresenceIndexer.presenceKey); |
| | | final EntryIDSet entrySet = index.get(txn, AttributeIndex.PRESENCE_KEY); |
| | | if (debugMessage != null && !entrySet.isDefined()) |
| | | { |
| | | updateStatsUndefinedResults(debugMessage, index); |
| | |
| | | { |
| | | debugMessage.append(INFO_JEB_INDEX_FILTER_INDEX_NOT_TRUSTED.get(index.getName())); |
| | | } |
| | | else if (index.isRebuildRunning()) |
| | | { |
| | | debugMessage.append(INFO_JEB_INDEX_FILTER_INDEX_REBUILD_IN_PROGRESS.get(index.getName())); |
| | | } |
| | | else |
| | | { |
| | | debugMessage.append(INFO_JEB_INDEX_FILTER_INDEX_LIMIT_EXCEEDED.get(index.getName())); |
| | |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.opends.server.backends.pluggable.EntryIDSet.*; |
| | | |
| | | import java.util.List; |
| | | import java.util.Set; |
| | | import static org.opends.server.backends.pluggable.EntryIDSet.newUndefinedSet; |
| | | |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.ConditionResult; |
| | | import org.forgerock.opendj.ldap.spi.IndexingOptions; |
| | | import org.opends.server.backends.pluggable.State.IndexFlag; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | import org.opends.server.types.Entry; |
| | | import org.opends.server.types.Modification; |
| | | |
| | | /** |
| | | * A null index which replaces id2children and id2subtree when they have been |
| | | * disabled. |
| | | * A null index which replaces id2children and id2subtree when they have been disabled. |
| | | */ |
| | | final class NullIndex extends Index |
| | | final class NullIndex implements Index |
| | | { |
| | | private final TreeName name; |
| | | |
| | | NullIndex(TreeName name, Indexer indexer, State state, WriteableTransaction txn, |
| | | EntryContainer entryContainer) throws StorageRuntimeException |
| | | NullIndex(TreeName name) |
| | | { |
| | | super(name, indexer, state, 0, 0, false, txn, entryContainer); |
| | | state.removeFlagsFromIndex(txn, name, IndexFlag.TRUSTED); |
| | | super.delete(txn); |
| | | this.name = name; |
| | | } |
| | | |
| | | @Override |
| | | void updateKey(WriteableTransaction txn, ByteString key, EntryIDSet deletedIDs, EntryIDSet addedIDs) |
| | | public void update(WriteableTransaction txn, ByteString key, EntryIDSet deletedIDs, EntryIDSet addedIDs) |
| | | throws StorageRuntimeException |
| | | { |
| | | // Do nothing. |
| | | } |
| | | |
| | | @Override |
| | | void delete(IndexBuffer buffer, ByteString keyBytes) |
| | | { |
| | | // Do nothing. |
| | | } |
| | | |
| | | @Override |
| | | ConditionResult containsID(ReadableTransaction txn, ByteString key, EntryID entryID) |
| | | throws StorageRuntimeException |
| | | { |
| | | return ConditionResult.UNDEFINED; |
| | | } |
| | | |
| | | @Override |
| | | EntryIDSet read(ReadableTransaction txn, ByteSequence key) |
| | | public EntryIDSet get(ReadableTransaction txn, ByteSequence key) |
| | | { |
| | | return newUndefinedSet(); |
| | | } |
| | | |
| | | @Override |
| | | EntryIDSet readRange(ReadableTransaction txn, ByteSequence lower, ByteSequence upper, boolean lowerIncluded, |
| | | boolean upperIncluded) |
| | | { |
| | | return newUndefinedSet(); |
| | | } |
| | | |
| | | @Override |
| | | int getEntryLimitExceededCount() |
| | | { |
| | | return 0; |
| | | } |
| | | |
| | | @Override |
| | | void addEntry(IndexBuffer buffer, EntryID entryID, Entry entry, IndexingOptions options) |
| | | throws StorageRuntimeException |
| | | { |
| | | // Do nothing. |
| | | } |
| | | |
| | | @Override |
| | | void removeEntry(IndexBuffer buffer, EntryID entryID, Entry entry, IndexingOptions options) |
| | | throws StorageRuntimeException |
| | | { |
| | | // Do nothing. |
| | | } |
| | | |
| | | @Override |
| | | void modifyEntry(IndexBuffer buffer, EntryID entryID, Entry oldEntry, Entry newEntry, List<Modification> mods, |
| | | IndexingOptions options) throws StorageRuntimeException |
| | | { |
| | | // Do nothing. |
| | | } |
| | | |
| | | @Override |
| | | boolean setIndexEntryLimit(int indexEntryLimit) |
| | | public boolean setIndexEntryLimit(int indexEntryLimit) |
| | | { |
| | | return false; |
| | | } |
| | | |
| | | @Override |
| | | int getIndexEntryLimit() |
| | | public int getIndexEntryLimit() |
| | | { |
| | | return 0; |
| | | } |
| | | |
| | | @Override |
| | | void setTrusted(WriteableTransaction txn, boolean trusted) throws StorageRuntimeException |
| | | public void setTrusted(WriteableTransaction txn, boolean trusted) throws StorageRuntimeException |
| | | { |
| | | // Do nothing. |
| | | } |
| | | |
| | | @Override |
| | | boolean isTrusted() |
| | | public boolean isTrusted() |
| | | { |
| | | return true; |
| | | } |
| | | |
| | | @Override |
| | | boolean isRebuildRunning() |
| | | public boolean getMaintainCount() |
| | | { |
| | | return false; |
| | | } |
| | | |
| | | @Override |
| | | boolean getMaintainCount() |
| | | { |
| | | return false; |
| | | } |
| | | |
| | | @Override |
| | | void open(WriteableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | // Do nothing. |
| | | } |
| | | |
| | | @Override |
| | | long getRecordCount(ReadableTransaction txn) throws StorageRuntimeException |
| | | public long getRecordCount(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | return 0; |
| | | } |
| | | |
| | | @Override |
| | | void delete(WriteableTransaction txn) throws StorageRuntimeException |
| | | public Cursor<ByteString, EntryIDSet> openCursor(ReadableTransaction txn) |
| | | { |
| | | return new Cursor<ByteString, EntryIDSet>() |
| | | { |
| | | |
| | | @Override |
| | | public boolean positionToKey(ByteSequence key) |
| | | { |
| | | return false; |
| | | } |
| | | |
| | | @Override |
| | | public boolean positionToKeyOrNext(ByteSequence key) |
| | | { |
| | | return false; |
| | | } |
| | | |
| | | @Override |
| | | public boolean positionToLastKey() |
| | | { |
| | | return false; |
| | | } |
| | | |
| | | @Override |
| | | public boolean positionToIndex(int index) |
| | | { |
| | | return false; |
| | | } |
| | | |
| | | @Override |
| | | public boolean next() |
| | | { |
| | | return false; |
| | | } |
| | | |
| | | @Override |
| | | public ByteString getKey() |
| | | { |
| | | return null; |
| | | } |
| | | |
| | | @Override |
| | | public EntryIDSet getValue() |
| | | { |
| | | return null; |
| | | } |
| | | |
| | | @Override |
| | | public void close() |
| | | { |
| | | // Nothing to do. |
| | | } |
| | | |
| | | }; |
| | | } |
| | | |
| | | @Override |
| | | public void importRemove(WriteableTransaction txn, ImportIDSet idsToBeRemoved) throws StorageRuntimeException |
| | | { |
| | | // Do nothing. |
| | | } |
| | | |
| | | @Override |
| | | void indexEntry(Entry entry, Set<ByteString> keys, IndexingOptions options) |
| | | public void importPut(WriteableTransaction txn, ImportIDSet idsToBeAdded) throws StorageRuntimeException |
| | | { |
| | | // Do nothing. |
| | | } |
| | | |
| | | @Override |
| | | public TreeName getName() |
| | | { |
| | | return name; |
| | | } |
| | | |
| | | @Override |
| | | public void open(WriteableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | // Do nothing. |
| | | } |
| | | |
| | | @Override |
| | | public void delete(WriteableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | // Do nothing. |
| | | } |
| | | |
| | | @Override |
| | | public void setName(TreeName name) |
| | | { |
| | | // Do nothing. |
| | | } |
| | |
| | | if (timeLimit > 0) |
| | | { |
| | | // Get a list of all the databases used by the backend. |
| | | ArrayList<DatabaseContainer> dbList = new ArrayList<DatabaseContainer>(); |
| | | final List<DatabaseContainer> databases = new ArrayList<DatabaseContainer>(); |
| | | for (EntryContainer ec : entryContainers.values()) |
| | | { |
| | | ec.sharedLock.lock(); |
| | | try |
| | | { |
| | | ec.listDatabases(dbList); |
| | | databases.addAll(ec.listDatabases()); |
| | | } |
| | | finally |
| | | { |
| | |
| | | } |
| | | |
| | | // Sort the list in order of priority. |
| | | Collections.sort(dbList, new DbPreloadComparator()); |
| | | Collections.sort(databases, new DbPreloadComparator()); |
| | | |
| | | // Preload each database until we reach the time limit or the cache |
| | | // is filled. |
| | |
| | | * This class is responsible for storing the configuration state of |
| | | * the JE backend for a particular suffix. |
| | | */ |
| | | class State extends DatabaseContainer |
| | | class State extends AbstractDatabaseContainer |
| | | { |
| | | |
| | | /** |
| | |
| | | * Ensure that the specified flags are not set for the given index |
| | | * @param txn a non null database transaction |
| | | * @param index The index storing the trusted state info. |
| | | * @return The flags of the index |
| | | * @throws NullPointerException if txn, index or flags is null |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | |
| | | import java.util.concurrent.CountDownLatch; |
| | | |
| | | import org.forgerock.i18n.slf4j.LocalizedLogger; |
| | | import org.opends.server.backends.pluggable.AttributeIndex.MatchingRuleIndex; |
| | | import org.opends.server.backends.pluggable.Importer.DNCache; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | |
| | | setTrusted(txn, attributeIndex.getSubstringIndex(), trusted); |
| | | setTrusted(txn, attributeIndex.getOrderingIndex(), trusted); |
| | | setTrusted(txn, attributeIndex.getApproximateIndex(), trusted); |
| | | Map<String, Collection<Index>> exIndexes = attributeIndex.getExtensibleIndexes(); |
| | | Map<String, Collection<MatchingRuleIndex>> exIndexes = attributeIndex.getExtensibleIndexes(); |
| | | if(!exIndexes.isEmpty()) |
| | | { |
| | | setTrusted(txn, exIndexes.get(EXTENSIBLE_INDEXER_ID_SUBSTRING), trusted); |
| | |
| | | } |
| | | } |
| | | |
| | | private void setTrusted(WriteableTransaction txn, Collection<Index> indexes, boolean trusted) |
| | | private void setTrusted(WriteableTransaction txn, Collection<MatchingRuleIndex> indexes, boolean trusted) |
| | | { |
| | | if (indexes != null) |
| | | { |
| | |
| | | * "tie-breaker" and ensures that keys correspond to one and only one entry. This ensures that all |
| | | * database updates can be performed using lock-free operations. |
| | | */ |
| | | class VLVIndex extends DatabaseContainer implements ConfigurationChangeListener<BackendVLVIndexCfg>, Closeable |
| | | class VLVIndex extends AbstractDatabaseContainer implements ConfigurationChangeListener<BackendVLVIndexCfg>, Closeable |
| | | { |
| | | private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); |
| | | |
| | |
| | | } |
| | | |
| | | @Override |
| | | void open(final WriteableTransaction txn) throws StorageRuntimeException |
| | | void doOpen(final WriteableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | super.open(txn); |
| | | count.set((int) txn.getRecordCount(getName())); |
| | | } |
| | | |
| | |
| | | { |
| | | if (shouldInclude(entry)) |
| | | { |
| | | buffer.getBufferedVLVIndexValues(this).addValues(encodeVLVKey(entry, entryID.longValue())); |
| | | buffer.put(this, encodeVLVKey(entry, entryID.longValue())); |
| | | } |
| | | } |
| | | |
| | |
| | | { |
| | | if (shouldInclude(entry)) |
| | | { |
| | | buffer.getBufferedVLVIndexValues(this).deleteValues(encodeVLVKey(entry, entryID.longValue())); |
| | | buffer.remove(this, encodeVLVKey(entry, entryID.longValue())); |
| | | } |
| | | } |
| | | |
| | |
| | | } |
| | | builder.append(separator); |
| | | } |
| | | |
| | | void closeAndDelete(WriteableTransaction txn) |
| | | { |
| | | close(); |
| | | delete(txn); |
| | | state.deleteRecord(txn, getName()); |
| | | } |
| | | } |
| | |
| | | import org.forgerock.opendj.ldap.schema.MatchingRule; |
| | | import org.forgerock.opendj.ldap.spi.IndexingOptions; |
| | | import org.opends.server.backends.VerifyConfig; |
| | | import org.opends.server.backends.pluggable.AttributeIndex.MatchingRuleIndex; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadOperation; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | |
| | | * @param index The index database to be checked. |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | private void iterateAttrIndex(ReadableTransaction txn, Index index, IndexingOptions options) |
| | | private void iterateAttrIndex(ReadableTransaction txn, MatchingRuleIndex index, IndexingOptions options) |
| | | throws StorageRuntimeException |
| | | { |
| | | if (index == null) |
| | |
| | | { |
| | | try |
| | | { |
| | | ConditionResult cr = id2c.containsID(txn, parentID.toByteString(), entryID); |
| | | ConditionResult cr = indexContainsID(id2c, txn, parentID.toByteString(), entryID); |
| | | if (cr == ConditionResult.FALSE) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | |
| | | { |
| | | try |
| | | { |
| | | ConditionResult cr = id2s.containsID(txn, id.toByteString(), entryID); |
| | | ConditionResult cr = indexContainsID(id2s, txn, id.toByteString(), entryID); |
| | | if (cr == ConditionResult.FALSE) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | |
| | | |
| | | if (presenceIndex != null) |
| | | { |
| | | verifyAttributeInIndex(presenceIndex, txn, PresenceIndexer.presenceKey, entryID); |
| | | verifyAttributeInIndex(presenceIndex, txn, AttributeIndex.PRESENCE_KEY, entryID); |
| | | } |
| | | |
| | | for (Attribute attr : attrList) |
| | |
| | | { |
| | | try |
| | | { |
| | | ConditionResult cr = index.containsID(txn, key, entryID); |
| | | ConditionResult cr = indexContainsID(index, txn, key, entryID); |
| | | if (cr == ConditionResult.FALSE) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | |
| | | } |
| | | } |
| | | |
| | | private ConditionResult indexContainsID(Index index, ReadableTransaction txn, ByteString key, EntryID entryID) |
| | | { |
| | | EntryIDSet entryIDSet = index.get(txn, key); |
| | | if (entryIDSet.isDefined()) |
| | | { |
| | | return ConditionResult.valueOf(entryIDSet.contains(entryID)); |
| | | } |
| | | return ConditionResult.UNDEFINED; |
| | | } |
| | | |
| | | private ByteString normalize(MatchingRule matchingRule, ByteString value) throws DirectoryException |
| | | { |
| | | try |